summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2021-02-07 14:54:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2021-02-07 14:54:51 +0000
commit7cdb5ea308ef8b4f72996d8f4ef96b5c6286a374 (patch)
tree6b9a497a34d1247dbee3643d8155eede2233fb6d
parentInitial commit. (diff)
downloadnetdata-go.d.plugin-upstream.tar.xz
netdata-go.d.plugin-upstream.zip
Adding upstream version 0.27.0.upstream/0.27.0upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--.circleci/config.yml64
-rw-r--r--.codecov.yml29
-rw-r--r--.dockerignore4
-rw-r--r--.github/CODEOWNERS3
-rw-r--r--.github/labeler.yml42
-rw-r--r--.github/workflows/codeql-analysis.yml71
-rw-r--r--.github/workflows/labeler.yml16
-rw-r--r--.github/workflows/reviewdog.yml25
-rw-r--r--.gitignore5
-rw-r--r--.travis.yml23
-rwxr-xr-x.travis/netdata_sync.sh51
-rwxr-xr-x.travis/publisher.sh40
-rw-r--r--.yamllint.yml10
-rw-r--r--CODE_OF_CONDUCT.md3
-rw-r--r--Dockerfile.dev22
-rw-r--r--LICENSE674
-rw-r--r--Makefile63
-rw-r--r--README.md141
-rw-r--r--agent/README.md157
-rw-r--r--agent/agent.go206
-rw-r--r--agent/agent_test.go106
-rw-r--r--agent/job/build/build.go355
-rw-r--r--agent/job/build/build_test.go107
-rw-r--r--agent/job/build/cache.go137
-rw-r--r--agent/job/build/cache_test.go134
-rw-r--r--agent/job/confgroup/group.go86
-rw-r--r--agent/job/confgroup/group_test.go322
-rw-r--r--agent/job/confgroup/registry.go21
-rw-r--r--agent/job/confgroup/registry_test.go42
-rw-r--r--agent/job/discovery/cache.go36
-rw-r--r--agent/job/discovery/dummy/discovery.go84
-rw-r--r--agent/job/discovery/dummy/discovery_test.go110
-rw-r--r--agent/job/discovery/file/discovery.go106
-rw-r--r--agent/job/discovery/file/discovery_test.go23
-rw-r--r--agent/job/discovery/file/parse.go131
-rw-r--r--agent/job/discovery/file/parse_test.go400
-rw-r--r--agent/job/discovery/file/read.go79
-rw-r--r--agent/job/discovery/file/read_test.go97
-rw-r--r--agent/job/discovery/file/sim_test.go129
-rw-r--r--agent/job/discovery/file/watch.go221
-rw-r--r--agent/job/discovery/file/watch_test.go350
-rw-r--r--agent/job/discovery/manager.go196
-rw-r--r--agent/job/discovery/manager_test.go175
-rw-r--r--agent/job/discovery/sim_test.go66
-rw-r--r--agent/job/job.go13
-rw-r--r--agent/job/mock.go82
-rw-r--r--agent/job/mock_test.go79
-rw-r--r--agent/job/registry/registry.go47
-rw-r--r--agent/job/registry/registry_test.go95
-rw-r--r--agent/job/run/run.go99
-rw-r--r--agent/job/run/run_test.go13
-rw-r--r--agent/job/state/state.go159
-rw-r--r--agent/job/state/state_test.go156
-rw-r--r--agent/module/charts.go440
-rw-r--r--agent/module/charts_test.go378
-rw-r--r--agent/module/job.go467
-rw-r--r--agent/module/job_test.go288
-rw-r--r--agent/module/mock.go53
-rw-r--r--agent/module/mock_test.go52
-rw-r--r--agent/module/module.go35
-rw-r--r--agent/module/registry.go43
-rw-r--r--agent/module/registry_test.go32
-rw-r--r--agent/netdataapi/api.go101
-rw-r--r--agent/netdataapi/api_test.go151
-rw-r--r--agent/setup.go201
-rw-r--r--agent/setup_test.go207
-rw-r--r--agent/testdata/agent-empty.conf0
-rw-r--r--agent/testdata/agent-invalid-syntax.conf7
-rw-r--r--agent/testdata/agent-valid.conf7
-rw-r--r--agent/ticker/ticker.go53
-rw-r--r--agent/ticker/ticket_test.go47
-rw-r--r--cli/cli.go40
-rw-r--r--cmd/godplugin/main.go129
-rw-r--r--config/go.d.conf74
-rw-r--r--config/go.d/activemq.conf190
-rw-r--r--config/go.d/apache.conf161
-rw-r--r--config/go.d/bind.conf170
-rw-r--r--config/go.d/cockroachdb.conf161
-rw-r--r--config/go.d/consul.conf179
-rw-r--r--config/go.d/coredns.conf184
-rw-r--r--config/go.d/couchbase.conf96
-rw-r--r--config/go.d/couchdb.conf173
-rw-r--r--config/go.d/dns_query.conf118
-rw-r--r--config/go.d/dnsdist.conf165
-rw-r--r--config/go.d/dnsmasq.conf100
-rw-r--r--config/go.d/dnsmasq_dhcp.conf103
-rw-r--r--config/go.d/docker_engine.conf158
-rw-r--r--config/go.d/dockerhub.conf164
-rw-r--r--config/go.d/elasticsearch.conf186
-rw-r--r--config/go.d/energid.conf163
-rw-r--r--config/go.d/example.conf94
-rw-r--r--config/go.d/filecheck.conf109
-rw-r--r--config/go.d/fluentd.conf167
-rw-r--r--config/go.d/freeradius.conf101
-rw-r--r--config/go.d/hdfs.conf160
-rw-r--r--config/go.d/httpcheck.conf176
-rw-r--r--config/go.d/isc_dhcpd.conf111
-rw-r--r--config/go.d/k8s_kubelet.conf159
-rw-r--r--config/go.d/k8s_kubeproxy.conf156
-rw-r--r--config/go.d/lighttpd.conf161
-rw-r--r--config/go.d/lighttpd2.conf161
-rw-r--r--config/go.d/logstash.conf161
-rw-r--r--config/go.d/mysql.conf136
-rw-r--r--config/go.d/nginx.conf161
-rw-r--r--config/go.d/nginxvts.conf160
-rw-r--r--config/go.d/openvpn.conf115
-rw-r--r--config/go.d/phpdaemon.conf158
-rw-r--r--config/go.d/phpfpm.conf169
-rw-r--r--config/go.d/pihole.conf134
-rw-r--r--config/go.d/pika.conf111
-rw-r--r--config/go.d/portcheck.conf99
-rw-r--r--config/go.d/powerdns.conf165
-rw-r--r--config/go.d/powerdns_recursor.conf161
-rw-r--r--config/go.d/prometheus.conf1512
-rw-r--r--config/go.d/pulsar.conf174
-rw-r--r--config/go.d/rabbitmq.conf167
-rw-r--r--config/go.d/redis.conf120
-rw-r--r--config/go.d/scaleio.conf148
-rw-r--r--config/go.d/solr.conf161
-rw-r--r--config/go.d/springboot2.conf173
-rw-r--r--config/go.d/squidlog.conf133
-rw-r--r--config/go.d/systemdunits.conf101
-rw-r--r--config/go.d/tengine.conf161
-rw-r--r--config/go.d/unbound.conf136
-rw-r--r--config/go.d/vcsa.conf134
-rw-r--r--config/go.d/vernemq.conf158
-rw-r--r--config/go.d/vsphere.conf174
-rw-r--r--config/go.d/web_log.conf222
-rw-r--r--config/go.d/whoisquery.conf97
-rw-r--r--config/go.d/wmi.conf143
-rw-r--r--config/go.d/x509check.conf132
-rw-r--r--config/go.d/zookeeper.conf119
-rw-r--r--docker-compose.yml54
-rw-r--r--docs/how-to-write-a-module.md259
-rw-r--r--examples/simple/main.go123
-rw-r--r--go.mod36
-rw-r--r--go.sum268
-rwxr-xr-xhack/go-build.sh80
-rwxr-xr-xhack/go-fmt.sh5
-rw-r--r--logger/countwatcher.go77
-rw-r--r--logger/countwatcher_test.go57
-rw-r--r--logger/formatter.go198
-rw-r--r--logger/formatter_test.go32
-rw-r--r--logger/logger.go191
-rw-r--r--logger/logger_test.go212
-rw-r--r--logger/severity.go58
-rw-r--r--logger/static.go99
-rw-r--r--mocks/blackbox/Dockerfile3
-rw-r--r--mocks/conf.d/go.d.conf16
-rw-r--r--mocks/conf.d/go.d/apache.conf6
-rw-r--r--mocks/conf.d/go.d/example.conf2
-rw-r--r--mocks/conf.d/go.d/logstash.conf3
-rw-r--r--mocks/conf.d/go.d/springboot2.conf8
-rw-r--r--mocks/conf.d/go.d/web_log.conf30
-rw-r--r--mocks/httpd/httpd.conf92
-rw-r--r--mocks/netdata/netdata.conf12
-rw-r--r--mocks/springboot2/.gitignore4
-rw-r--r--mocks/springboot2/Dockerfile12
-rw-r--r--mocks/springboot2/build.gradle33
-rw-r--r--mocks/springboot2/settings.gradle2
-rw-r--r--mocks/springboot2/src/main/java/hello/Main.java23
-rw-r--r--mocks/springboot2/src/main/resources/application.properties1
-rw-r--r--mocks/tmp/.gitkeep0
-rw-r--r--modules/activemq/README.md71
-rw-r--r--modules/activemq/activemq.go308
-rw-r--r--modules/activemq/activemq_test.go327
-rw-r--r--modules/activemq/apiclient.go135
-rw-r--r--modules/activemq/charts.go44
-rw-r--r--modules/apache/README.md75
-rw-r--r--modules/apache/apache.go120
-rw-r--r--modules/apache/apache_test.go187
-rw-r--r--modules/apache/apiclient.go186
-rw-r--r--modules/apache/charts.go132
-rw-r--r--modules/apache/collect.go23
-rw-r--r--modules/apache/metrics.go59
-rw-r--r--modules/apache/testdata/extended-status.txt39
-rw-r--r--modules/apache/testdata/lighttpd-status.txt6
-rw-r--r--modules/apache/testdata/simple-status.txt24
-rw-r--r--modules/bind/README.md109
-rw-r--r--modules/bind/bind.go304
-rw-r--r--modules/bind/bind_test.go517
-rw-r--r--modules/bind/charts.go194
-rw-r--r--modules/bind/json_client.go81
-rw-r--r--modules/bind/testdata/query-server.json302
-rw-r--r--modules/bind/testdata/query-server.xml470
-rw-r--r--modules/bind/xml3_client.go131
-rw-r--r--modules/cockroachdb/README.md172
-rw-r--r--modules/cockroachdb/charts.go848
-rw-r--r--modules/cockroachdb/cockroachdb.go114
-rw-r--r--modules/cockroachdb/cockroachdb_test.go317
-rw-r--r--modules/cockroachdb/collect.go158
-rw-r--r--modules/cockroachdb/metrics.go374
-rw-r--r--modules/cockroachdb/testdata/metrics.txt2952
-rw-r--r--modules/cockroachdb/testdata/non_cockroachdb.txt27
-rw-r--r--modules/consul/README.md64
-rw-r--r--modules/consul/apiclient.go92
-rw-r--r--modules/consul/charts.go31
-rw-r--r--modules/consul/consul.go190
-rw-r--r--modules/consul/consul_test.go133
-rw-r--r--modules/consul/testdata/checks.txt75
-rw-r--r--modules/coredns/README.md89
-rw-r--r--modules/coredns/charts.go324
-rw-r--r--modules/coredns/collect.go648
-rw-r--r--modules/coredns/coredns.go124
-rw-r--r--modules/coredns/coredns_test.go501
-rw-r--r--modules/coredns/metrics.go109
-rw-r--r--modules/coredns/testdata/no_load.txt3
-rw-r--r--modules/coredns/testdata/some_load.txt177
-rw-r--r--modules/couchbase/README.md80
-rw-r--r--modules/couchbase/charts.go82
-rw-r--r--modules/couchbase/collect.go151
-rw-r--r--modules/couchbase/couchbase.go99
-rw-r--r--modules/couchbase/couchbase_test.go233
-rw-r--r--modules/couchbase/dev.md11
-rw-r--r--modules/couchbase/init.go37
-rw-r--r--modules/couchbase/metrics.go31
-rw-r--r--modules/couchbase/testdata/6.6.0/buckets_basic_stats.json422
-rw-r--r--modules/couchdb/README.md78
-rw-r--r--modules/couchdb/charts.go226
-rw-r--r--modules/couchdb/collect.go243
-rw-r--r--modules/couchdb/couchdb.go110
-rw-r--r--modules/couchdb/couchdb_test.go457
-rw-r--r--modules/couchdb/init.go64
-rw-r--r--modules/couchdb/metrics.go198
-rw-r--r--modules/couchdb/testdata/v3.1.1/active_tasks.json63
-rw-r--r--modules/couchdb/testdata/v3.1.1/dbs_info.json52
-rw-r--r--modules/couchdb/testdata/v3.1.1/node_stats.json1651
-rw-r--r--modules/couchdb/testdata/v3.1.1/node_system.json176
-rw-r--r--modules/couchdb/testdata/v3.1.1/root.json16
-rw-r--r--modules/dnsdist/README.md87
-rw-r--r--modules/dnsdist/charts.go149
-rw-r--r--modules/dnsdist/collect.go73
-rw-r--r--modules/dnsdist/dev.md81
-rw-r--r--modules/dnsdist/dnsdist.go98
-rw-r--r--modules/dnsdist/dnsdist_test.go257
-rw-r--r--modules/dnsdist/init.go29
-rw-r--r--modules/dnsdist/metrics.go39
-rw-r--r--modules/dnsdist/testdata/v1.5.1/jsonstat.json56
-rw-r--r--modules/dnsmasq/README.md81
-rw-r--r--modules/dnsmasq/charts.go51
-rw-r--r--modules/dnsmasq/collect.go139
-rw-r--r--modules/dnsmasq/dev.md23
-rw-r--r--modules/dnsmasq/dnsmasq.go101
-rw-r--r--modules/dnsmasq/dnsmasq_test.go260
-rw-r--r--modules/dnsmasq/init.go41
-rw-r--r--modules/dnsmasq_dhcp/README.md74
-rw-r--r--modules/dnsmasq_dhcp/autodetection.go146
-rw-r--r--modules/dnsmasq_dhcp/charts.go65
-rw-r--r--modules/dnsmasq_dhcp/collect.go104
-rw-r--r--modules/dnsmasq_dhcp/dhcp.go118
-rw-r--r--modules/dnsmasq_dhcp/dhcp_test.go128
-rw-r--r--modules/dnsmasq_dhcp/find.go249
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq.conf77
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq.d/.dnsmasq.conf1
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv4.any10
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv6.any10
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv4.any10
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv6.any10
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq.d2/~dnsmasq.conf1
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasq.bak1
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv4.any10
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv6.any3
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasq.other1
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv4.conf10
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv6.conf10
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq.leases19
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq2.conf6
-rw-r--r--modules/dnsmasq_dhcp/testdata/dnsmasq3.conf4
-rw-r--r--modules/dnsquery/README.md64
-rw-r--r--modules/dnsquery/charts.go20
-rw-r--r--modules/dnsquery/dnsquery.go238
-rw-r--r--modules/dnsquery/dnsquery_test.go123
-rw-r--r--modules/dnsquery/worker.go53
-rw-r--r--modules/docker_engine/README.md77
-rw-r--r--modules/docker_engine/charts.go134
-rw-r--r--modules/docker_engine/collect.go210
-rw-r--r--modules/docker_engine/docker_engine.go115
-rw-r--r--modules/docker_engine/docker_engine_test.go361
-rw-r--r--modules/docker_engine/metrics.go72
-rw-r--r--modules/docker_engine/testdata/non-docker-engine.txt0
-rw-r--r--modules/docker_engine/testdata/v17.05.0-ce.txt460
-rw-r--r--modules/docker_engine/testdata/v18.09.3-ce-swarm.txt468
-rw-r--r--modules/docker_engine/testdata/v18.09.3-ce.txt465
-rw-r--r--modules/dockerhub/README.md69
-rw-r--r--modules/dockerhub/apiclient.go82
-rw-r--r--modules/dockerhub/charts.go88
-rw-r--r--modules/dockerhub/collect.go63
-rw-r--r--modules/dockerhub/dockerhub.go106
-rw-r--r--modules/dockerhub/dockerhub_test.go144
-rw-r--r--modules/dockerhub/testdata/repo1.txt22
-rw-r--r--modules/dockerhub/testdata/repo2.txt22
-rw-r--r--modules/dockerhub/testdata/repo3.txt22
-rw-r--r--modules/elasticsearch/README.md138
-rw-r--r--modules/elasticsearch/charts.go581
-rw-r--r--modules/elasticsearch/collect.go302
-rw-r--r--modules/elasticsearch/elasticsearch.go112
-rw-r--r--modules/elasticsearch/elasticsearch_test.go626
-rw-r--r--modules/elasticsearch/init.go55
-rw-r--r--modules/elasticsearch/metrics.go262
-rw-r--r--modules/elasticsearch/testdata/v7.9.0/cat_indices_stats.json50
-rw-r--r--modules/elasticsearch/testdata/v7.9.0/cluster_health.json17
-rw-r--r--modules/elasticsearch/testdata/v7.9.0/cluster_stats.json197
-rw-r--r--modules/elasticsearch/testdata/v7.9.0/info.json17
-rw-r--r--modules/elasticsearch/testdata/v7.9.0/nodes_local_stats.json824
-rw-r--r--modules/energid/README.md71
-rw-r--r--modules/energid/charts.go95
-rw-r--r--modules/energid/collect.go160
-rw-r--r--modules/energid/dev.md34
-rw-r--r--modules/energid/energid.go97
-rw-r--r--modules/energid/energid_test.go282
-rw-r--r--modules/energid/init.go29
-rw-r--r--modules/energid/jsonrpc.go46
-rw-r--r--modules/energid/metrics.go47
-rw-r--r--modules/energid/testdata/v2.4.1/getblockchaininfo.json66
-rw-r--r--modules/energid/testdata/v2.4.1/getmemoryinfo.json14
-rw-r--r--modules/energid/testdata/v2.4.1/getmempoolinfo.json11
-rw-r--r--modules/energid/testdata/v2.4.1/getnetworkinfo.json41
-rw-r--r--modules/energid/testdata/v2.4.1/gettxoutsetinfo.json13
-rw-r--r--modules/example/README.md73
-rw-r--r--modules/example/charts.go40
-rw-r--r--modules/example/collect.go45
-rw-r--r--modules/example/example.go96
-rw-r--r--modules/example/example_test.go328
-rw-r--r--modules/example/init.go41
-rw-r--r--modules/filecheck/README.md131
-rw-r--r--modules/filecheck/charts.go77
-rw-r--r--modules/filecheck/collect.go35
-rw-r--r--modules/filecheck/collect_dirs.go182
-rw-r--r--modules/filecheck/collect_files.go146
-rw-r--r--modules/filecheck/filecheck.go105
-rw-r--r--modules/filecheck/filecheck_test.go343
-rw-r--r--modules/filecheck/init.go40
-rw-r--r--modules/filecheck/testdata/dir/empty_file.log0
-rw-r--r--modules/filecheck/testdata/dir/file.log61
-rw-r--r--modules/filecheck/testdata/dir/subdir/empty_file.log0
-rw-r--r--modules/filecheck/testdata/empty_file.log0
-rw-r--r--modules/filecheck/testdata/file.log42
-rw-r--r--modules/fluentd/README.md77
-rw-r--r--modules/fluentd/apiclient.go100
-rw-r--r--modules/fluentd/charts.go35
-rw-r--r--modules/fluentd/fluentd.go162
-rw-r--r--modules/fluentd/fluentd_test.go113
-rw-r--r--modules/fluentd/testdata/plugins.json101
-rw-r--r--modules/freeradius/README.md86
-rw-r--r--modules/freeradius/api/client.go172
-rw-r--r--modules/freeradius/api/client_test.go150
-rw-r--r--modules/freeradius/api/dictionary.go2681
-rw-r--r--modules/freeradius/charts.go137
-rw-r--r--modules/freeradius/collect.go14
-rw-r--r--modules/freeradius/freeradius.go103
-rw-r--r--modules/freeradius/freeradius_test.go186
-rw-r--r--modules/hdfs/README.md108
-rw-r--r--modules/hdfs/charts.go326
-rw-r--r--modules/hdfs/client.go68
-rw-r--r--modules/hdfs/collect.go216
-rw-r--r--modules/hdfs/hdfs.go148
-rw-r--r--modules/hdfs/hdfs_test.go304
-rw-r--r--modules/hdfs/metrics.go243
-rw-r--r--modules/hdfs/testdata/datanode.json165
-rw-r--r--modules/hdfs/testdata/namenode.json132
-rw-r--r--modules/hdfs/testdata/unknownnode.json34
-rw-r--r--modules/httpcheck/README.md78
-rw-r--r--modules/httpcheck/charts.go65
-rw-r--r--modules/httpcheck/collect.go149
-rw-r--r--modules/httpcheck/httpcheck.go125
-rw-r--r--modules/httpcheck/httpcheck_test.go267
-rw-r--r--modules/httpcheck/metrics.go20
-rw-r--r--modules/init.go62
-rw-r--r--modules/isc_dhcpd/README.md85
-rw-r--r--modules/isc_dhcpd/charts.go33
-rw-r--r--modules/isc_dhcpd/collect.go87
-rw-r--r--modules/isc_dhcpd/init.go88
-rw-r--r--modules/isc_dhcpd/isc_dhcpd.go96
-rw-r--r--modules/isc_dhcpd/isc_dhcpd_test.go328
-rw-r--r--modules/isc_dhcpd/parse.go90
-rw-r--r--modules/isc_dhcpd/testdata/dhcpd.leases_empty0
-rw-r--r--modules/isc_dhcpd/testdata/dhcpd.leases_ipv4370
-rw-r--r--modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_backup39
-rw-r--r--modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_inactive370
-rw-r--r--modules/isc_dhcpd/testdata/dhcpd.leases_ipv667
-rw-r--r--modules/k8s_kubelet/README.md83
-rw-r--r--modules/k8s_kubelet/charts.go234
-rw-r--r--modules/k8s_kubelet/collect.go319
-rw-r--r--modules/k8s_kubelet/kubelet.go106
-rw-r--r--modules/k8s_kubelet/kubelet_test.go202
-rw-r--r--modules/k8s_kubelet/metrics.go111
-rw-r--r--modules/k8s_kubelet/testdata/metrics.txt574
-rw-r--r--modules/k8s_kubelet/testdata/token.txt1
-rw-r--r--modules/k8s_kubeproxy/README.md68
-rw-r--r--modules/k8s_kubeproxy/charts.go106
-rw-r--r--modules/k8s_kubeproxy/collect.go144
-rw-r--r--modules/k8s_kubeproxy/kubeproxy.go102
-rw-r--r--modules/k8s_kubeproxy/kubeproxy_test.go125
-rw-r--r--modules/k8s_kubeproxy/metrics.go52
-rw-r--r--modules/k8s_kubeproxy/testdata/metrics.txt190
-rw-r--r--modules/lighttpd/README.md71
-rw-r--r--modules/lighttpd/apiclient.go169
-rw-r--r--modules/lighttpd/charts.go78
-rw-r--r--modules/lighttpd/collect.go23
-rw-r--r--modules/lighttpd/lighttpd.go96
-rw-r--r--modules/lighttpd/lighttpd_test.go144
-rw-r--r--modules/lighttpd/metrics.go31
-rw-r--r--modules/lighttpd/testdata/apache-status.txt39
-rw-r--r--modules/lighttpd/testdata/status.txt6
-rw-r--r--modules/lighttpd2/README.md72
-rw-r--r--modules/lighttpd2/apiclient.go152
-rw-r--r--modules/lighttpd2/charts.go94
-rw-r--r--modules/lighttpd2/collect.go23
-rw-r--r--modules/lighttpd2/lighttpd2.go95
-rw-r--r--modules/lighttpd2/lighttpd2_test.go128
-rw-r--r--modules/lighttpd2/metrics.go35
-rw-r--r--modules/lighttpd2/testdata/status.txt34
-rw-r--r--modules/logstash/README.md71
-rw-r--r--modules/logstash/charts.go196
-rw-r--r--modules/logstash/client.go137
-rw-r--r--modules/logstash/collect.go54
-rw-r--r--modules/logstash/logstash.go110
-rw-r--r--modules/logstash/logstash_test.go186
-rw-r--r--modules/logstash/testdata/stats.json252
-rw-r--r--modules/mysql/README.md165
-rw-r--r--modules/mysql/charts.go801
-rw-r--r--modules/mysql/collect.go116
-rw-r--r--modules/mysql/collect_global_status.go234
-rw-r--r--modules/mysql/collect_global_vars.go52
-rw-r--r--modules/mysql/collect_slave_status.go115
-rw-r--r--modules/mysql/collect_user_statistics.go74
-rw-r--r--modules/mysql/collect_version.go38
-rw-r--r--modules/mysql/dev.md27
-rw-r--r--modules/mysql/mycnf.go77
-rw-r--r--modules/mysql/mycnf_test.go99
-rw-r--r--modules/mysql/mysql.go136
-rw-r--r--modules/mysql/mysql_test.go954
-rw-r--r--modules/mysql/testdata/mariadb/v10.5.4/all_slaves_status.txt124
-rw-r--r--modules/mysql/testdata/mariadb/v10.5.4/global_status.txt1860
-rw-r--r--modules/mysql/testdata/mariadb/v10.5.4/global_variables.txt1944
-rw-r--r--modules/mysql/testdata/mariadb/v10.5.4/user_statistics.txt52
-rw-r--r--modules/mysql/testdata/mariadb/v10.5.4/version.txt2
-rw-r--r--modules/mysql/testdata/mariadb/v5.5.46/global_status.txt1239
-rw-r--r--modules/mysql/testdata/mariadb/v5.5.46/global_variables.txt1257
-rw-r--r--modules/mysql/testdata/mariadb/v5.5.46/slave_status.txt41
-rw-r--r--modules/mysql/testdata/mariadb/v5.5.46/version.txt2
-rw-r--r--modules/mysql/testdata/mysql/v8.0.21/global_status.txt1416
-rw-r--r--modules/mysql/testdata/mysql/v8.0.21/global_variables.txt1728
-rw-r--r--modules/mysql/testdata/mysql/v8.0.21/slave_status.txt122
-rw-r--r--modules/mysql/testdata/mysql/v8.0.21/version.txt2
-rw-r--r--modules/nginx/README.md71
-rw-r--r--modules/nginx/apiclient.go167
-rw-r--r--modules/nginx/charts.go56
-rw-r--r--modules/nginx/collect.go15
-rw-r--r--modules/nginx/metrics.go32
-rw-r--r--modules/nginx/nginx.go93
-rw-r--r--modules/nginx/nginx_test.go139
-rw-r--r--modules/nginx/testdata/status.txt4
-rw-r--r--modules/nginx/testdata/tengine-status.txt4
-rw-r--r--modules/nginxvts/README.md76
-rw-r--r--modules/nginxvts/charts.go128
-rw-r--r--modules/nginxvts/collect.go80
-rw-r--r--modules/nginxvts/dev.md45
-rw-r--r--modules/nginxvts/init.go45
-rw-r--r--modules/nginxvts/metrics.go51
-rw-r--r--modules/nginxvts/nginxvts.go95
-rw-r--r--modules/nginxvts/nginxvts_test.go259
-rw-r--r--modules/nginxvts/testdata/vts-v0.1.18.json44
-rw-r--r--modules/openvpn/README.md82
-rw-r--r--modules/openvpn/charts.go60
-rw-r--r--modules/openvpn/client/client.go238
-rw-r--r--modules/openvpn/client/client_test.go170
-rw-r--r--modules/openvpn/client/commands.go36
-rw-r--r--modules/openvpn/client/testdata/load-stats.txt1
-rw-r--r--modules/openvpn/client/testdata/status3.txt77
-rw-r--r--modules/openvpn/client/testdata/version.txt3
-rw-r--r--modules/openvpn/client/types.go26
-rw-r--r--modules/openvpn/collect.go95
-rw-r--r--modules/openvpn/openvpn.go139
-rw-r--r--modules/openvpn/openvpn_test.go143
-rw-r--r--modules/phpdaemon/README.md142
-rw-r--r--modules/phpdaemon/charts.go64
-rw-r--r--modules/phpdaemon/client.go76
-rw-r--r--modules/phpdaemon/collect.go17
-rw-r--r--modules/phpdaemon/metrics.go31
-rw-r--r--modules/phpdaemon/phpdaemon.go117
-rw-r--r--modules/phpdaemon/phpdaemon_test.go138
-rw-r--r--modules/phpdaemon/testdata/fullstatus.json10
-rw-r--r--modules/phpfpm/README.md79
-rw-r--r--modules/phpfpm/charts.go82
-rw-r--r--modules/phpfpm/client.go145
-rw-r--r--modules/phpfpm/collect.go70
-rw-r--r--modules/phpfpm/decode.go130
-rw-r--r--modules/phpfpm/init.go38
-rw-r--r--modules/phpfpm/phpfpm.go75
-rw-r--r--modules/phpfpm/phpfpm_test.go277
-rw-r--r--modules/phpfpm/testdata/status-full-no-idle.json63
-rw-r--r--modules/phpfpm/testdata/status-full.json63
-rw-r--r--modules/phpfpm/testdata/status-full.txt59
-rw-r--r--modules/phpfpm/testdata/status.json16
-rw-r--r--modules/phpfpm/testdata/status.txt14
-rw-r--r--modules/pihole/README.md93
-rw-r--r--modules/pihole/charts.go311
-rw-r--r--modules/pihole/client/client.go262
-rw-r--r--modules/pihole/client/client_test.go299
-rw-r--r--modules/pihole/client/testdata/getForwardDestinations.json8
-rw-r--r--modules/pihole/client/testdata/getQueryTypes.json11
-rw-r--r--modules/pihole/client/testdata/summary.json27
-rw-r--r--modules/pihole/client/testdata/summaryRaw.json27
-rw-r--r--modules/pihole/client/testdata/topClients.json5
-rw-r--r--modules/pihole/client/testdata/topItems.json8
-rw-r--r--modules/pihole/client/types.go129
-rw-r--r--modules/pihole/collect.go179
-rw-r--r--modules/pihole/metrics.go36
-rw-r--r--modules/pihole/pihole.go141
-rw-r--r--modules/pihole/pihole_test.go276
-rw-r--r--modules/pihole/testdata/setupVars.conf11
-rw-r--r--modules/pihole/webpassword.go66
-rw-r--r--modules/pika/README.md99
-rw-r--r--modules/pika/charts.go244
-rw-r--r--modules/pika/collect.go69
-rw-r--r--modules/pika/collect_info.go217
-rw-r--r--modules/pika/dev.md22
-rw-r--r--modules/pika/init.go45
-rw-r--r--modules/pika/pika.go113
-rw-r--r--modules/pika/pika_test.go291
-rw-r--r--modules/pika/testdata/redis/info_all.txt165
-rw-r--r--modules/pika/testdata/v3.4.0/info_all.txt64
-rw-r--r--modules/portcheck/README.md69
-rw-r--r--modules/portcheck/charts.go62
-rw-r--r--modules/portcheck/collect.go71
-rw-r--r--modules/portcheck/portcheck.go126
-rw-r--r--modules/portcheck/portcheck_test.go152
-rw-r--r--modules/powerdns/README.md85
-rw-r--r--modules/powerdns/authoritativens.go93
-rw-r--r--modules/powerdns/authoritativens_test.go332
-rw-r--r--modules/powerdns/charts.go64
-rw-r--r--modules/powerdns/collect.go100
-rw-r--r--modules/powerdns/dev.md40
-rw-r--r--modules/powerdns/init.go27
-rw-r--r--modules/powerdns/metrics.go11
-rw-r--r--modules/powerdns/testdata/recursor/statistics.json587
-rw-r--r--modules/powerdns/testdata/v4.3.0/statistics.json507
-rw-r--r--modules/powerdns_recursor/README.md84
-rw-r--r--modules/powerdns_recursor/charts.go96
-rw-r--r--modules/powerdns_recursor/collect.go100
-rw-r--r--modules/powerdns_recursor/dev.md28
-rw-r--r--modules/powerdns_recursor/init.go27
-rw-r--r--modules/powerdns_recursor/metrics.go16
-rw-r--r--modules/powerdns_recursor/recursor.go93
-rw-r--r--modules/powerdns_recursor/recursor_test.go367
-rw-r--r--modules/powerdns_recursor/testdata/authoritative/statistics.json507
-rw-r--r--modules/powerdns_recursor/testdata/v4.3.1/statistics.json587
-rw-r--r--modules/prometheus/README.md207
-rw-r--r--modules/prometheus/cache.go127
-rw-r--r--modules/prometheus/charts.go169
-rw-r--r--modules/prometheus/collect.go99
-rw-r--r--modules/prometheus/collect_gauge_counter.go73
-rw-r--r--modules/prometheus/collect_histogram.go96
-rw-r--r--modules/prometheus/collect_summary.go83
-rw-r--r--modules/prometheus/collect_unknown.go17
-rw-r--r--modules/prometheus/group.go161
-rw-r--r--modules/prometheus/init.go89
-rw-r--r--modules/prometheus/prometheus.go117
-rw-r--r--modules/prometheus/prometheus_test.go1159
-rw-r--r--modules/pulsar/README.md169
-rw-r--r--modules/pulsar/charts.go661
-rw-r--r--modules/pulsar/collect.go136
-rw-r--r--modules/pulsar/metrics.go114
-rw-r--r--modules/pulsar/pulsar.go153
-rw-r--r--modules/pulsar/pulsar_test.go1013
-rw-r--r--modules/pulsar/testdata/non-pulsar.txt27
-rw-r--r--modules/pulsar/testdata/standalone-v2.5.0-namespaces.txt500
-rw-r--r--modules/pulsar/testdata/standalone-v2.5.0-topics-2.txt748
-rw-r--r--modules/pulsar/testdata/standalone-v2.5.0-topics.txt748
-rw-r--r--modules/rabbitmq/README.md81
-rw-r--r--modules/rabbitmq/charts.go197
-rw-r--r--modules/rabbitmq/client.go184
-rw-r--r--modules/rabbitmq/collect.go125
-rw-r--r--modules/rabbitmq/rabbitmq.go109
-rw-r--r--modules/rabbitmq/rabbitmq_test.go226
-rw-r--r--modules/rabbitmq/testdata/node.json143
-rw-r--r--modules/rabbitmq/testdata/overview.json74
-rw-r--r--modules/rabbitmq/testdata/vhosts.json138
-rw-r--r--modules/redis/README.md95
-rw-r--r--modules/redis/charts.go248
-rw-r--r--modules/redis/collect.go69
-rw-r--r--modules/redis/collect_info.go215
-rw-r--r--modules/redis/dev.md21
-rw-r--r--modules/redis/init.go45
-rw-r--r--modules/redis/redis.go113
-rw-r--r--modules/redis/redis_test.go378
-rw-r--r--modules/redis/testdata/pika/info_all.txt67
-rw-r--r--modules/redis/testdata/v6.0.9/info_all.txt165
-rw-r--r--modules/scaleio/README.md117
-rw-r--r--modules/scaleio/charts.go463
-rw-r--r--modules/scaleio/client/client.go315
-rw-r--r--modules/scaleio/client/client_test.go140
-rw-r--r--modules/scaleio/client/server.go147
-rw-r--r--modules/scaleio/client/types.go1094
-rw-r--r--modules/scaleio/collect.go56
-rw-r--r--modules/scaleio/collect_sdc.go36
-rw-r--r--modules/scaleio/collect_storage_pool.go39
-rw-r--r--modules/scaleio/collect_system.go248
-rw-r--r--modules/scaleio/metrics.go124
-rw-r--r--modules/scaleio/queries.go109
-rw-r--r--modules/scaleio/scaleio.go116
-rw-r--r--modules/scaleio/scaleio_test.go373
-rw-r--r--modules/scaleio/testdata/instances.json1160
-rw-r--r--modules/scaleio/testdata/selected_statistics.json777
-rw-r--r--modules/solr/README.md79
-rw-r--r--modules/solr/charts.go139
-rw-r--r--modules/solr/parser.go149
-rw-r--r--modules/solr/solr.go208
-rw-r--r--modules/solr/solr_test.go272
-rw-r--r--modules/solr/testdata/core-metrics-v6.txt794
-rw-r--r--modules/solr/testdata/core-metrics-v7.txt732
-rw-r--r--modules/springboot2/README.md80
-rw-r--r--modules/springboot2/charts.go75
-rw-r--r--modules/springboot2/springboot2.go183
-rw-r--r--modules/springboot2/springboot2_test.go101
-rw-r--r--modules/springboot2/tests/testdata.txt194
-rw-r--r--modules/springboot2/tests/testdata2.txt193
-rw-r--r--modules/squidlog/README.md167
-rw-r--r--modules/squidlog/charts.go366
-rw-r--r--modules/squidlog/collect.go358
-rw-r--r--modules/squidlog/init.go72
-rw-r--r--modules/squidlog/logline.go394
-rw-r--r--modules/squidlog/logline_test.go473
-rw-r--r--modules/squidlog/metrics.go91
-rw-r--r--modules/squidlog/squidlog.go102
-rw-r--r--modules/squidlog/squidlog_test.go333
-rw-r--r--modules/squidlog/testdata/access.log500
-rw-r--r--modules/squidlog/testdata/unknown.log1
-rw-r--r--modules/systemdunits/README.md99
-rw-r--r--modules/systemdunits/charts.go88
-rw-r--r--modules/systemdunits/client.go29
-rw-r--r--modules/systemdunits/collect.go215
-rw-r--r--modules/systemdunits/doc.go2
-rw-r--r--modules/systemdunits/init.go31
-rw-r--r--modules/systemdunits/systemdunits.go104
-rw-r--r--modules/systemdunits/systemdunits_test.go528
-rw-r--r--modules/tengine/README.md75
-rw-r--r--modules/tengine/apiclient.go246
-rw-r--r--modules/tengine/charts.go116
-rw-r--r--modules/tengine/collect.go20
-rw-r--r--modules/tengine/metrics.go73
-rw-r--r--modules/tengine/tengine.go95
-rw-r--r--modules/tengine/tengine_test.go133
-rw-r--r--modules/tengine/testdata/status.txt3
-rw-r--r--modules/unbound/README.md164
-rw-r--r--modules/unbound/charts.go519
-rw-r--r--modules/unbound/client.go119
-rw-r--r--modules/unbound/client_test.go116
-rw-r--r--modules/unbound/collect.go196
-rw-r--r--modules/unbound/config/config.go76
-rw-r--r--modules/unbound/config/config_test.go170
-rw-r--r--modules/unbound/config/parse.go162
-rw-r--r--modules/unbound/config/parse_test.go91
-rw-r--r--modules/unbound/config/testdata/infinite_rec.conf85
-rw-r--r--modules/unbound/config/testdata/non_existent_glob_include.conf85
-rw-r--r--modules/unbound/config/testdata/non_existent_include.conf85
-rw-r--r--modules/unbound/config/testdata/valid_glob.conf82
-rw-r--r--modules/unbound/config/testdata/valid_glob2.conf80
-rw-r--r--modules/unbound/config/testdata/valid_glob3.conf81
-rw-r--r--modules/unbound/config/testdata/valid_include.conf82
-rw-r--r--modules/unbound/config/testdata/valid_include2.conf81
-rw-r--r--modules/unbound/config/testdata/valid_include3.conf81
-rw-r--r--modules/unbound/config/testdata/valid_include_toplevel.conf82
-rw-r--r--modules/unbound/config/testdata/valid_include_toplevel2.conf81
-rw-r--r--modules/unbound/config/testdata/valid_include_toplevel3.conf81
-rw-r--r--modules/unbound/init.go107
-rw-r--r--modules/unbound/testdata/stats/common.txt66
-rw-r--r--modules/unbound/testdata/stats/extended.txt162
-rw-r--r--modules/unbound/testdata/stats/lifecycle/cumulative/extended1.txt162
-rw-r--r--modules/unbound/testdata/stats/lifecycle/cumulative/extended2.txt162
-rw-r--r--modules/unbound/testdata/stats/lifecycle/cumulative/extended3.txt163
-rw-r--r--modules/unbound/testdata/stats/lifecycle/reset/extended1.txt163
-rw-r--r--modules/unbound/testdata/stats/lifecycle/reset/extended2.txt156
-rw-r--r--modules/unbound/testdata/stats/lifecycle/reset/extended3.txt163
-rw-r--r--modules/unbound/testdata/unbound.conf85
-rw-r--r--modules/unbound/testdata/unbound_disabled.conf85
-rw-r--r--modules/unbound/testdata/unbound_empty.conf85
-rw-r--r--modules/unbound/unbound.go108
-rw-r--r--modules/unbound/unbound_test.go1268
-rw-r--r--modules/vcsa/README.md107
-rw-r--r--modules/vcsa/charts.go48
-rw-r--r--modules/vcsa/client/client.go212
-rw-r--r--modules/vcsa/client/client_test.go286
-rw-r--r--modules/vcsa/collect.go166
-rw-r--r--modules/vcsa/vcsa.go143
-rw-r--r--modules/vcsa/vcsa_test.go246
-rw-r--r--modules/vernemq/README.md173
-rw-r--r--modules/vernemq/charts.go916
-rw-r--r--modules/vernemq/collect.go294
-rw-r--r--modules/vernemq/metrics.go148
-rw-r--r--modules/vernemq/testdata/metrics-v1.10.1-mqtt5.txt416
-rw-r--r--modules/vernemq/testdata/non_vernemq.txt27
-rw-r--r--modules/vernemq/vernemq.go108
-rw-r--r--modules/vernemq/vernemq_test.go566
-rw-r--r--modules/vsphere/README.md173
-rw-r--r--modules/vsphere/charts.go439
-rw-r--r--modules/vsphere/client/client.go178
-rw-r--r--modules/vsphere/client/client_test.go173
-rw-r--r--modules/vsphere/client/keepalive.go43
-rw-r--r--modules/vsphere/collect.go214
-rw-r--r--modules/vsphere/discover.go28
-rw-r--r--modules/vsphere/discover/build.go178
-rw-r--r--modules/vsphere/discover/discover.go161
-rw-r--r--modules/vsphere/discover/discover_test.go177
-rw-r--r--modules/vsphere/discover/filter.go58
-rw-r--r--modules/vsphere/discover/hierarchy.go98
-rw-r--r--modules/vsphere/discover/metric_lists.go133
-rw-r--r--modules/vsphere/match/match.go230
-rw-r--r--modules/vsphere/match/match_test.go285
-rw-r--r--modules/vsphere/metrics.txt328
-rw-r--r--modules/vsphere/resources/resources.go135
-rw-r--r--modules/vsphere/scrape/scrape.go157
-rw-r--r--modules/vsphere/scrape/scrape_test.go68
-rw-r--r--modules/vsphere/scrape/throttled_caller.go31
-rw-r--r--modules/vsphere/scrape/throttled_caller_test.go40
-rw-r--r--modules/vsphere/task.go59
-rw-r--r--modules/vsphere/task_test.go39
-rw-r--r--modules/vsphere/vsphere.go199
-rw-r--r--modules/vsphere/vsphere_test.go491
-rw-r--r--modules/weblog/README.md344
-rw-r--r--modules/weblog/charts.go852
-rw-r--r--modules/weblog/collect.go546
-rw-r--r--modules/weblog/init.go137
-rw-r--r--modules/weblog/logline.go604
-rw-r--r--modules/weblog/logline_test.go664
-rw-r--r--modules/weblog/metrics.go162
-rw-r--r--modules/weblog/parser.go165
-rw-r--r--modules/weblog/parser_test.go222
-rw-r--r--modules/weblog/testdata/common.log500
-rw-r--r--modules/weblog/testdata/custom.log100
-rw-r--r--modules/weblog/testdata/custom_time_fields.log72
-rw-r--r--modules/weblog/testdata/full.log500
-rw-r--r--modules/weblog/weblog.go143
-rw-r--r--modules/weblog/weblog_test.go1224
-rw-r--r--modules/whoisquery/README.md69
-rw-r--r--modules/whoisquery/charts.go28
-rw-r--r--modules/whoisquery/collect.go19
-rw-r--r--modules/whoisquery/provider.go49
-rw-r--r--modules/whoisquery/whoisquery.go89
-rw-r--r--modules/whoisquery/whoisquery_test.go135
-rw-r--r--modules/wmi/README.md121
-rw-r--r--modules/wmi/charts.go776
-rw-r--r--modules/wmi/collect.go60
-rw-r--r--modules/wmi/collect_collection.go49
-rw-r--r--modules/wmi/collect_cpu.go140
-rw-r--r--modules/wmi/collect_logical_disk.go90
-rw-r--r--modules/wmi/collect_logon.go65
-rw-r--r--modules/wmi/collect_memory.go172
-rw-r--r--modules/wmi/collect_net.go107
-rw-r--r--modules/wmi/collect_os.go84
-rw-r--r--modules/wmi/collect_system.go67
-rw-r--r--modules/wmi/metrics.go285
-rw-r--r--modules/wmi/testdata/full.txt478
-rw-r--r--modules/wmi/testdata/partial.txt315
-rw-r--r--modules/wmi/wmi.go119
-rw-r--r--modules/wmi/wmi_test.go558
-rw-r--r--modules/x509check/README.md82
-rw-r--r--modules/x509check/charts.go57
-rw-r--r--modules/x509check/collect.go50
-rw-r--r--modules/x509check/provider.go129
-rw-r--r--modules/x509check/x509check.go104
-rw-r--r--modules/x509check/x509check_test.go159
-rw-r--r--modules/zookeeper/README.md76
-rw-r--r--modules/zookeeper/charts.go109
-rw-r--r--modules/zookeeper/client.go161
-rw-r--r--modules/zookeeper/client_test.go100
-rw-r--r--modules/zookeeper/collect.go73
-rw-r--r--modules/zookeeper/testdata/mntr.txt416
-rw-r--r--modules/zookeeper/testdata/mntr_notinwhitelist.txt1
-rw-r--r--modules/zookeeper/zookeeper.go106
-rw-r--r--modules/zookeeper/zookeeper_test.go163
-rw-r--r--pkg/README.md15
-rw-r--r--pkg/iprange/README.md29
-rw-r--r--pkg/iprange/parse.go136
-rw-r--r--pkg/iprange/parse_test.go256
-rw-r--r--pkg/iprange/pool.go38
-rw-r--r--pkg/iprange/pool_test.go102
-rw-r--r--pkg/iprange/range.go98
-rw-r--r--pkg/iprange/range_test.go198
-rw-r--r--pkg/logs/csv.go170
-rw-r--r--pkg/logs/csv_test.go173
-rw-r--r--pkg/logs/json.go85
-rw-r--r--pkg/logs/json_test.go198
-rw-r--r--pkg/logs/lastline.go57
-rw-r--r--pkg/logs/lastline_test.go53
-rw-r--r--pkg/logs/ltsv.go63
-rw-r--r--pkg/logs/ltsv_test.go113
-rw-r--r--pkg/logs/parser.go60
-rw-r--r--pkg/logs/parser_test.go1
-rw-r--r--pkg/logs/reader.go191
-rw-r--r--pkg/logs/reader_test.go244
-rw-r--r--pkg/logs/regexp.go71
-rw-r--r--pkg/logs/regexp_test.go129
-rw-r--r--pkg/matcher/README.md133
-rw-r--r--pkg/matcher/cache.go54
-rw-r--r--pkg/matcher/cache_test.go51
-rw-r--r--pkg/matcher/doc.go37
-rw-r--r--pkg/matcher/doc_test.go47
-rw-r--r--pkg/matcher/expr.go60
-rw-r--r--pkg/matcher/expr_test.go98
-rw-r--r--pkg/matcher/glob.go264
-rw-r--r--pkg/matcher/glob_test.go95
-rw-r--r--pkg/matcher/logical.go99
-rw-r--r--pkg/matcher/logical_test.go95
-rw-r--r--pkg/matcher/matcher.go146
-rw-r--r--pkg/matcher/matcher_test.go120
-rw-r--r--pkg/matcher/regexp.go58
-rw-r--r--pkg/matcher/regexp_test.go64
-rw-r--r--pkg/matcher/simple_patterns.go63
-rw-r--r--pkg/matcher/simple_patterns_test.go86
-rw-r--r--pkg/matcher/string.go46
-rw-r--r--pkg/matcher/string_test.go60
-rw-r--r--pkg/metrics/counter.go91
-rw-r--r--pkg/metrics/counter_test.go103
-rw-r--r--pkg/metrics/gauge.go101
-rw-r--r--pkg/metrics/gauge_test.go127
-rw-r--r--pkg/metrics/histogram.go143
-rw-r--r--pkg/metrics/histogram_test.go134
-rw-r--r--pkg/metrics/metrics.go10
-rw-r--r--pkg/metrics/summary.go122
-rw-r--r--pkg/metrics/summary_test.go76
-rw-r--r--pkg/metrics/unique_counter.go107
-rw-r--r--pkg/metrics/unique_counter_test.go143
-rw-r--r--pkg/multipath/multipath.go57
-rw-r--r--pkg/multipath/multipath_test.go37
-rw-r--r--pkg/multipath/testdata/test-empty.conf0
-rw-r--r--pkg/multipath/testdata/test.conf1
-rw-r--r--pkg/prometheus/metrics.go182
-rw-r--r--pkg/prometheus/metrics_test.go154
-rw-r--r--pkg/prometheus/prometheus.go161
-rw-r--r--pkg/prometheus/prometheus_test.go129
-rw-r--r--pkg/prometheus/selector/README.md94
-rw-r--r--pkg/prometheus/selector/expr.go60
-rw-r--r--pkg/prometheus/selector/expr_test.go229
-rw-r--r--pkg/prometheus/selector/logical.go47
-rw-r--r--pkg/prometheus/selector/logical_test.go224
-rw-r--r--pkg/prometheus/selector/parse.go95
-rw-r--r--pkg/prometheus/selector/parse_test.go115
-rw-r--r--pkg/prometheus/selector/selector.go50
-rw-r--r--pkg/prometheus/selector/selector_test.go9
-rw-r--r--pkg/prometheus/tests/testdata.nometa.txt410
-rw-r--r--pkg/prometheus/tests/testdata.txt528
-rw-r--r--pkg/stm/README.md72
-rw-r--r--pkg/stm/stm.go171
-rw-r--r--pkg/stm/stm_test.go413
-rw-r--r--pkg/tlscfg/README.md53
-rw-r--r--pkg/tlscfg/config.go75
-rw-r--r--pkg/tlscfg/config_test.go8
-rw-r--r--pkg/web/README.md90
-rw-r--r--pkg/web/client.go76
-rw-r--r--pkg/web/client_test.go21
-rw-r--r--pkg/web/doc.go7
-rw-r--r--pkg/web/doc_test.go13
-rw-r--r--pkg/web/duration.go37
-rw-r--r--pkg/web/duration_test.go22
-rw-r--r--pkg/web/request.go81
-rw-r--r--pkg/web/request_test.go178
-rw-r--r--pkg/web/web.go9
859 files changed, 143062 insertions, 0 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
new file mode 100644
index 0000000..64b07b5
--- /dev/null
+++ b/.circleci/config.yml
@@ -0,0 +1,64 @@
+version: 2
+jobs:
+ compile:
+ docker:
+ - image: circleci/golang:1.15
+ steps:
+ - checkout
+ - restore_cache:
+ keys:
+ - go_mod-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
+ - run: go get -t -v -d ./...
+ - save_cache:
+ key: go_mod-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
+ paths:
+ - /go/pkg/mod
+ - run: CGO_ENABLED=0 go build -o /tmp/godplugin github.com/netdata/go.d.plugin/cmd/godplugin
+ - run: /tmp/godplugin --help || true
+ - store_artifacts:
+ path: /tmp/godplugin
+ vet:
+ docker:
+ - image: circleci/golang:1.15
+ steps:
+ - checkout
+ - restore_cache:
+ keys:
+ - go_mod-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
+ - run: go vet ./...
+ fmt:
+ docker:
+ - image: circleci/golang:1.15
+ steps:
+ - checkout
+ - restore_cache:
+ keys:
+ - go_mod-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
+ - run:
+ name: "Enforce Go Formatted Code"
+ command: "! go fmt ./... 2>&1 | read"
+ test:
+ docker:
+ - image: circleci/golang:1.15
+ steps:
+ - checkout
+ - restore_cache:
+ keys:
+ - go_mod-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
+ - run: go test ./... -coverprofile=coverage.txt -race -cover -covermode=atomic
+ - run: bash <(curl -s https://codecov.io/bash)
+
+workflows:
+ version: 2
+ build_and_test:
+ jobs:
+ - compile
+ - vet:
+ requires:
+ - compile
+ - fmt:
+ requires:
+ - compile
+ - test:
+ requires:
+ - compile
diff --git a/.codecov.yml b/.codecov.yml
new file mode 100644
index 0000000..54cda9e
--- /dev/null
+++ b/.codecov.yml
@@ -0,0 +1,29 @@
+codecov:
+ notify:
+ require_ci_to_pass: yes
+
+coverage:
+ precision: 2
+ round: down
+ range: "70...100"
+
+ status:
+ project: true
+ patch: yes
+ changes: no
+
+parsers:
+ gcov:
+ branch_detection:
+ conditional: yes
+ loop: yes
+ method: no
+ macro: no
+
+comment:
+ layout: "header"
+ behavior: default
+ require_changes: true
+
+ignore:
+ - "modules/freeradius/api/dictionary.go"
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..d79e4ae
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,4 @@
+Makefile
+/hack
+docker-compose.yml
+/mocks/tmp \ No newline at end of file
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 0000000..8eb8084
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,3 @@
+* @ilyam8
+
+*.md @ilyam8 @joelhans
diff --git a/.github/labeler.yml b/.github/labeler.yml
new file mode 100644
index 0000000..d106699
--- /dev/null
+++ b/.github/labeler.yml
@@ -0,0 +1,42 @@
+# This configures label matching for PR's.
+#
+# The keys are labels, and the values are lists of minimatch patterns
+# to which those labels apply.
+#
+# NOTE: This can only add labels, not remove them.
+# NOTE: Due to YAML syntax limitations, patterns or labels which start
+# with a character that is part of the standard YAML syntax must be
+# quoted.
+#
+# Please keep the labels sorted and deduplicated.
+
+area/agent:
+ - agent/**
+ - cli/**
+ - logger/**
+
+area/build:
+ - hack/**
+ - Makefile
+
+area/ci:
+ - .circleci/**
+ - .github/**
+ - .travis/**
+ - .codecov.yml
+ - .travis.yml
+ - .yamllint.yml
+
+area/conf:
+ - config/**
+
+area/docs:
+ - "**/*.md"
+ - "*.md"
+
+area/modules:
+ - modules/**
+ - config/go.d/*.conf
+
+area/pkg:
+ - pkg/**
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
new file mode 100644
index 0000000..ea4ab02
--- /dev/null
+++ b/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,71 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+name: "CodeQL"
+
+on:
+ push:
+ branches: [master]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [master]
+ schedule:
+ - cron: '0 11 * * 4'
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ubuntu-latest
+
+ strategy:
+ fail-fast: false
+ matrix:
+ # Override automatic language detection by changing the below list
+ # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python']
+ language: ['go']
+ # Learn more...
+ # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v2
+ with:
+ # We must fetch at least the immediate parents so that if this is
+ # a pull request then we can checkout the head.
+ fetch-depth: 2
+
+ # If this run was triggered by a pull request event, then checkout
+ # the head of the pull request instead of the merge commit.
+ - run: git checkout HEAD^2
+ if: ${{ github.event_name == 'pull_request' }}
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v1
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+ # queries: ./path/to/local/query, your-org/your-repo/queries@main
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually (see below)
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v1
+
+ # ℹī¸ Command-line programs to run using the OS shell.
+ # 📚 https://git.io/JvXDl
+
+ # ✏ī¸ If the Autobuild fails above, remove it and uncomment the following three lines
+ # and modify them (or add more) to build your code if your project
+ # uses a compiled language
+
+ #- run: |
+ # make bootstrap
+ # make release
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v1
diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml
new file mode 100644
index 0000000..7ed11d6
--- /dev/null
+++ b/.github/workflows/labeler.yml
@@ -0,0 +1,16 @@
+---
+# Handles labelling of PR's.
+name: Pull Request Labeler
+on:
+ schedule:
+ - cron: '*/10 * * * *'
+jobs:
+ labeler:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: docker://docker.io/ilyam8/periodic-pr-labeler:v0.1.1
+ if: github.repository == 'netdata/go.d.plugin'
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ GITHUB_REPOSITORY: ${{ github.repository }}
+ LABEL_MAPPINGS_FILE: .github/labeler.yml
diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml
new file mode 100644
index 0000000..5f36e49
--- /dev/null
+++ b/.github/workflows/reviewdog.yml
@@ -0,0 +1,25 @@
+name: reviewdog
+on: [pull_request]
+jobs:
+ golangci-lint:
+ name: golangci-lint
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Run golangci-lint
+ uses: reviewdog/action-golangci-lint@v1
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ reporter: github-pr-check
+ yamllint:
+ name: yamllint
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Run yamllint
+ uses: reviewdog/action-yamllint@v1
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ reporter: github-pr-check
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..9f00ddf
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+.idea
+bin/
+mocks/springboot2/.gradle/
+mocks/tmp/*
+!mocks/tmp/.gitkeep \ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..30c784a
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,23 @@
+---
+language: go
+go:
+ - '1.15'
+
+env:
+ - GO111MODULE=on
+
+stages:
+- build
+- name: release
+ if: tag =~ ^v
+
+jobs:
+ include:
+ - stage: build
+ name: lint & test & build
+ script: make all
+ - stage: release
+ name: publish release artifacts
+ script:
+ - make release
+ - .travis/publisher.sh
diff --git a/.travis/netdata_sync.sh b/.travis/netdata_sync.sh
new file mode 100755
index 0000000..d46ad99
--- /dev/null
+++ b/.travis/netdata_sync.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+# This is the go.d.plugin->netdata version sync script.
+# It ingests the updated go.d.plugin version information to netdata repository
+#
+# Execution Requirements:
+# - hub command
+# - GITHUB_TOKEN variable set with GitHub token
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Pawel Krupa (@paulfantom)
+# Author: Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+
+set -e
+
+if [ ! -f .gitignore ]; then
+ echo "Run as ./travis/$(basename "$0") from top level directory of git repository"
+ exit 1
+fi
+
+if [ -z ${TRAVIS_TAG+x} ]; then
+ exit 1
+fi
+
+CHECKSUM_FILE="$(pwd)/bin/sha256sums.txt"
+NETDATA_GIT_URL="https://github.com/netdata/netdata.git"
+LOCAL_NETDATA_DIR="netdata"
+PR_TITLE="Use go.d.plugin in version ${TRAVIS_TAG}"
+PR_MSG="This is an autogenerated pull request and it should consist of 17 line changes. Please review before merging."
+export GIT_MAIL="bot@netdata.cloud"
+export GIT_USER="netdatabot"
+
+echo "--- Initialize git configuration ---"
+git config user.email "${GIT_MAIL}"
+git config user.name "${GIT_USER}"
+
+echo "--- Cloning ${NETDATA_GIT_URL} --- "
+git clone "${NETDATA_GIT_URL}" "${LOCAL_NETDATA_DIR}"
+cd "${LOCAL_NETDATA_DIR}"
+
+echo "--- Preparing changes for ${NETDATA_GIT_URL} ---"
+cp "$CHECKSUM_FILE" packaging/go.d.checksums
+echo "${TRAVIS_TAG}" > packaging/go.d.version
+git checkout -b new_go_d_version
+git add packaging/go.d.version packaging/go.d.checksums
+git commit -m "installer: include go.d.plugin version ${TRAVIS_TAG}"
+
+echo "--- Pushing changes to ${NETDATA_GIT_URL} --- "
+hub pull-request -p -r paulkatsoulakis,ilyam8 -m "${PR_TITLE}" -m "${PR_MSG}"
+
+echo "Netdata syncing completed successfully!"
diff --git a/.travis/publisher.sh b/.travis/publisher.sh
new file mode 100755
index 0000000..c4b5418
--- /dev/null
+++ b/.travis/publisher.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+# This is the release script.
+# It installs hub and prepares a release
+#
+# Execution Requirements:
+# - GITHUB_TOKEN variable set with GitHub token
+#
+# Copyright: SPDX-License-Identifier: GPL-3.0-or-later
+#
+# Author: Pawel Krupa (@paulfantom)
+# Author: Pavlos Emm. Katsoulakis (paul@netdata.cloud)
+
+set -e
+
+if [ ! -f .gitignore ]; then
+ echo "Run as ./travis/$(basename "$0") from top level directory of git repository"
+ exit 1
+fi
+
+if [ -z ${TRAVIS_TAG+x} ]; then
+ exit 1
+fi
+
+HUB_VERSION=${HUB_VERSION:-"2.7.0"}
+
+echo "--- Download hub version: ${HUB_VERSION} ---"
+wget "https://github.com/github/hub/releases/download/v${HUB_VERSION}/hub-linux-amd64-${HUB_VERSION}.tgz" -O "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz"
+tar -C /tmp -xvf "/tmp/hub-linux-amd64-${HUB_VERSION}.tgz"
+export PATH=$PATH:"/tmp/hub-linux-amd64-${HUB_VERSION}/bin"
+
+set +e
+
+for i in bin/*; do
+ echo "--- Call hub to Release ${TRAVIS_TAG} for ${i} ---"
+ hub release edit -a "${i}" -m "${TRAVIS_TAG}" "${TRAVIS_TAG}"
+ sleep 2
+done
+
+echo "---- Submit PR to netdata/netdata to sync new version information ----"
+./.travis/netdata_sync.sh
diff --git a/.yamllint.yml b/.yamllint.yml
new file mode 100644
index 0000000..c459fc7
--- /dev/null
+++ b/.yamllint.yml
@@ -0,0 +1,10 @@
+extends: default
+
+yaml-files:
+ - 'config/*.conf'
+ - 'config/go.d/*.conf'
+
+rules:
+ document-start: disable
+ line-length:
+ max: 120
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..b629df5
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,3 @@
+# Netdata Community Code of Conduct
+
+This repository part of the Netdata Community, thus all contributors should follow our [Code of Conduct](https://github.com/netdata/netdata/blob/master/CODE_OF_CONDUCT.md).
diff --git a/Dockerfile.dev b/Dockerfile.dev
new file mode 100644
index 0000000..b165bb6
--- /dev/null
+++ b/Dockerfile.dev
@@ -0,0 +1,22 @@
+FROM golang:1.15 AS build-env
+
+RUN mkdir -p /workspace
+WORKDIR /workspace
+
+ENV GOOS=linux
+ENV GOARCH=amd64
+ENV CGO_ENABLED=0
+
+ADD go.mod go.sum ./
+
+RUN go mod download
+
+ADD . .
+
+RUN go build -o go.d.plugin github.com/netdata/go.d.plugin/cmd/godplugin
+
+FROM netdata/netdata
+
+ADD ./mocks/netdata/netdata.conf /etc/netdata/
+ADD ./mocks/conf.d /usr/lib/netdata/conf.d
+COPY --from=build-env /workspace/go.d.plugin /usr/libexec/netdata/plugins.d/go.d.plugin
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..94a9ed0
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..43bda45
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,63 @@
+DEV_MODULES := all
+
+all: download vet test build
+
+.PHONY: help
+help:
+ @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
+
+.PHONY: download
+download: ## Download go modules
+ go mod download
+
+.PHONY: build
+build: clean ## Build package
+ hack/go-build.sh
+
+.PHONY: clean
+clean:
+ rm -rf bin
+
+.PHONY: check
+check: fmt vet lint ## Run static code analysis
+
+
+.PHONY: test
+test: ## Run tests
+ go test ./... -race -cover -covermode=atomic
+
+.PHONY: fmt
+fmt:
+ hack/go-fmt.sh .
+
+.PHONY: vet
+vet:
+ go vet ./...
+
+.PHONY: release
+release: clean download ## Create all release artifacts
+ hack/go-build.sh all
+ tar -zcvf bin/config.tar.gz -C config .
+ cd bin && sha256sum -b * >"sha256sums.txt"
+
+.PHONY: dev
+dev: dev-build dev-up ## Launch development build
+
+dev-build:
+ docker-compose build
+
+dev-up:
+ docker-compose up -d --remove-orphans
+
+.PHONY: dev-exec
+dev-exec: ## Get into development environment
+ docker-compose exec netdata bash
+
+dev-log:
+ docker-compose logs -f netdata
+
+dev-run: ## Run go.d.plugin inside development environment
+ go run github.com/netdata/go.d.plugin/cmd/godplugin -d -c conf.d
+
+dev-mock: ## Run go.d.plugin inside development environment with mock config
+ go run github.com/netdata/go.d.plugin/cmd/godplugin -d -c ./mocks/conf.d -m $(DEV_MODULES)
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..37f8b9d
--- /dev/null
+++ b/README.md
@@ -0,0 +1,141 @@
+# go.d.plugin
+
+[![CircleCI](https://circleci.com/gh/netdata/go.d.plugin.svg?style=svg)](https://circleci.com/gh/netdata/go.d.plugin)
+
+`go.d.plugin` is a `Netdata` external plugin. It is an **orchestrator** for data collection modules written in `go`.
+
+1. It runs as an independent process `ps fax` shows it.
+2. It is started and stopped automatically by `Netdata`.
+3. It communicates with `Netdata` via a unidirectional pipe (sending data to the `Netdata` daemon).
+4. Supports any number of data collection [modules](https://github.com/netdata/go.d.plugin/tree/master/modules).
+5. Allows each [module](https://github.com/netdata/go.d.plugin/tree/master/modules) to have any number of data collection **jobs**.
+
+## Install
+
+Shipped with [`Netdata`](https://github.com/netdata/netdata).
+
+## Contributing
+
+If you have time and willing to help, there are a lof of ways to contribute:
+
+- Fix and [report bugs](https://github.com/netdata/go.d.plugin/issues/new)
+- [Review code and feature proposals](https://github.com/netdata/go.d.plugin/pulls)
+- [Contribute modules](https://github.com/netdata/go.d.plugin/blob/master/CONTRIBUTING.md) (wip, module interface may be changed soon)
+
+## Available modules
+
+| Name | Monitors |
+| :------------------------------------------------------------------------------------------------ | :------------------------------ |
+| [activemq](https://github.com/netdata/go.d.plugin/tree/master/modules/activemq) | `ActiveMQ` |
+| [apache](https://github.com/netdata/go.d.plugin/tree/master/modules/apache) | `Apache` |
+| [bind](https://github.com/netdata/go.d.plugin/tree/master/modules/bind) | `ISC Bind` |
+| [cockroachdb](https://github.com/netdata/go.d.plugin/tree/master/modules/cockroachdb) | `CockroachDB` |
+| [consul](https://github.com/netdata/go.d.plugin/tree/master/modules/consul) | `Consul` |
+| [coredns](https://github.com/netdata/go.d.plugin/tree/master/modules/coredns) | `CoreDNS` |
+| [couchbase](https://github.com/netdata/go.d.plugin/tree/master/modules/couchbase) | `Couchbase` |
+| [couchdb](https://github.com/netdata/go.d.plugin/tree/master/modules/couchdb) | `CouchDB` |
+| [dnsdist](https://github.com/netdata/go.d.plugin/tree/master/modules/dnsdist) | `Dnsdist` |
+| [dnsmasq](https://github.com/netdata/go.d.plugin/tree/master/modules/dnsmasq) | `Dnsmasq DNS Forwarder` |
+| [dnsmasq_dhcp](https://github.com/netdata/go.d.plugin/tree/master/modules/dnsmasq_dhcp) | `Dnsmasq DHCP` |
+| [dns_query](https://github.com/netdata/go.d.plugin/tree/master/modules/dnsquery) | `DNS Query RTT` |
+| [docker_engine](https://github.com/netdata/go.d.plugin/tree/master/modules/docker_engine) | `Docker Engine` |
+| [dockerhub](https://github.com/netdata/go.d.plugin/tree/master/modules/dockerhub) | `Docker Hub` |
+| [elasticsearch](https://github.com/netdata/go.d.plugin/tree/master/modules/elasticsearch) | `Elasticsearch` |
+| [energid](https://github.com/netdata/go.d.plugin/tree/master/modules/energid) | `Energi Core` |
+| [example](https://github.com/netdata/go.d.plugin/tree/master/modules/example) | - |
+| [filecheck](https://github.com/netdata/go.d.plugin/tree/master/modules/filecheck) | `Files and Directories` |
+| [fluentd](https://github.com/netdata/go.d.plugin/tree/master/modules/fluentd) | `Fluentd` |
+| [freeradius](https://github.com/netdata/go.d.plugin/tree/master/modules/freeradius) | `FreeRADIUS` |
+| [hdfs](https://github.com/netdata/go.d.plugin/tree/master/modules/hdfs) | `HDFS` |
+| [httpcheck](https://github.com/netdata/go.d.plugin/tree/master/modules/httpcheck) | `Any HTTP Endpoint` |
+| [isc_dhcpd](https://github.com/netdata/go.d.plugin/tree/master/modules/isc_dhcpd) | `ISC dhcpd` |
+| [k8s_kubelet](https://github.com/netdata/go.d.plugin/tree/master/modules/k8s_kubelet) | `Kubelet` |
+| [k8s_kubeproxy](https://github.com/netdata/go.d.plugin/tree/master/modules/k8s_kubeproxy) | `Kube-proxy` |
+| [lighttpd](https://github.com/netdata/go.d.plugin/tree/master/modules/lighttpd) | `Lighttpd` |
+| [lighttpd2](https://github.com/netdata/go.d.plugin/tree/master/modules/lighttpd2) | `Lighttpd2` |
+| [logstash](https://github.com/netdata/go.d.plugin/tree/master/modules/logstash) | `Logstash` |
+| [mysql](https://github.com/netdata/go.d.plugin/tree/master/modules/mysql) | `MySQL` |
+| [nginx](https://github.com/netdata/go.d.plugin/tree/master/modules/nginx) | `NGINX` |
+| [nginxvts](https://github.com/netdata/go.d.plugin/tree/master/modules/nginxvts) | `NGINX VTS` |
+| [openvpn](https://github.com/netdata/go.d.plugin/tree/master/modules/openvpn) | `OpenVPN` |
+| [phpdaemon](https://github.com/netdata/go.d.plugin/tree/master/modules/phpdaemon) | `phpDaemon` |
+| [phpfpm](https://github.com/netdata/go.d.plugin/tree/master/modules/phpfpm) | `PHP-FPM` |
+| [pihole](https://github.com/netdata/go.d.plugin/tree/master/modules/pihole) | `Pi-hole` |
+| [pika](https://github.com/netdata/go.d.plugin/tree/master/modules/pika) | `Pika` |
+| [prometheus](https://github.com/netdata/go.d.plugin/tree/master/modules/prometheus) | `Any Prometheus Endpoint` |
+| [portcheck](https://github.com/netdata/go.d.plugin/tree/master/modules/portcheck) | `Any TCP Endpoint` |
+| [powerdns](https://github.com/netdata/go.d.plugin/tree/master/modules/powerdns) | `PowerDNS Authoritative Server` |
+| [powerdns_recursor](https://github.com/netdata/go.d.plugin/tree/master/modules/powerdns_recursor) | `PowerDNS Recursor` |
+| [pulsar](https://github.com/netdata/go.d.plugin/tree/master/modules/portcheck) | `Apache Pulsar` |
+| [rabbitmq](https://github.com/netdata/go.d.plugin/tree/master/modules/rabbitmq) | `RabbitMQ` |
+| [redis](https://github.com/netdata/go.d.plugin/tree/master/modules/redis) | `Redis` |
+| [scaleio](https://github.com/netdata/go.d.plugin/tree/master/modules/scaleio) | `Dell EMC ScaleIO` |
+| [solr](https://github.com/netdata/go.d.plugin/tree/master/modules/solr) | `Solr` |
+| [squidlog](https://github.com/netdata/go.d.plugin/tree/master/modules/squidlog) | `Squid` |
+| [springboot2](https://github.com/netdata/go.d.plugin/tree/master/modules/springboot2) | `Spring Boot2` |
+| [systemdunits](https://github.com/netdata/go.d.plugin/tree/master/modules/systemdunits) | `Systemd unit state` |
+| [tengine](https://github.com/netdata/go.d.plugin/tree/master/modules/tengine) | `Tengine` |
+| [unbound](https://github.com/netdata/go.d.plugin/tree/master/modules/unbound) | `Unbound` |
+| [vcsa](https://github.com/netdata/go.d.plugin/tree/master/modules/vcsa) | `vCenter Server Appliance` |
+| [vernemq](https://github.com/netdata/go.d.plugin/tree/master/modules/vernemq) | `VerneMQ` |
+| [vsphere](https://github.com/netdata/go.d.plugin/tree/master/modules/vsphere) | `VMware vCenter Server` |
+| [web_log](https://github.com/netdata/go.d.plugin/tree/master/modules/weblog) | `Apache/NGINX` |
+| [whoisquery](https://github.com/netdata/go.d.plugin/tree/master/modules/whoisquery) | `Domain Expiry` |
+| [wmi](https://github.com/netdata/go.d.plugin/tree/master/modules/wmi) | `Windows Machines` |
+| [x509check](https://github.com/netdata/go.d.plugin/tree/master/modules/x509check) | `Digital Certificates` |
+| [zookeeper](https://github.com/netdata/go.d.plugin/tree/master/modules/zookeeper) | `ZooKeeper` |
+
+## Configuration
+
+Edit the `go.d.conf` configuration file using `edit-config` from the Netdata [config
+directory](https://learn.netdata.cloud/docs/configure/nodes), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory
+sudo ./edit-config go.d.conf
+```
+
+Configurations are written in [YAML](http://yaml.org/).
+
+- [plugin configuration](https://github.com/netdata/go.d.plugin/blob/master/config/go.d.conf)
+- [specific module configuration](https://github.com/netdata/go.d.plugin/tree/master/config/go.d)
+
+## Developing
+
+- Add your module to the [modules dir](https://github.com/netdata/go.d.plugin/tree/master/modules).
+- Import the module in the [main.go](https://github.com/netdata/go.d.plugin/blob/master/cmd/godplugin/main.go).
+- To build it execute `make` from the plugin root dir or `hack/go-build.sh`.
+- Run it in the debug mode `bin/godplugin -d -m <MODULE_NAME>`.
+- Use `make clean` when you are done with testing.
+
+## Troubleshooting
+
+Plugin CLI:
+
+```sh
+Usage:
+ orchestrator [OPTIONS] [update every]
+
+Application Options:
+ -m, --modules= module name to run (default: all)
+ -c, --config-dir= config dir to read
+ -w, --watch-path= config path to watch
+ -d, --debug debug mode
+ -v, --version display the version and exit
+
+Help Options:
+ -h, --help Show this help message
+```
+
+To debug specific module:
+
+```sh
+# become user netdata
+sudo su -s /bin/bash netdata
+
+# run plugin in debug mode
+./go.d.plugin -d -m <module name>
+```
+
+Change `<module name>` to the module name you want to debug.
+See the [whole list](#available-modules) of available modules.
diff --git a/agent/README.md b/agent/README.md
new file mode 100644
index 0000000..a86fed2
--- /dev/null
+++ b/agent/README.md
@@ -0,0 +1,157 @@
+# agent
+
+This library is a tool for writing [netdata](https://github.com/netdata/netdata) plugins.
+
+We strongly believe that custom plugins are very important and they must be easy to write.
+
+
+Definitions:
+ - orchestrator
+ > plugin orchestrators are external plugins that do not collect any data by themselves. Instead they support data collection modules written in the language of the orchestrator. Usually the orchestrator provides a higher level abstraction, making it ideal for writing new data collection modules with the minimum of code.
+
+ - plugin
+ > plugin is a set of data collection modules.
+
+ - module
+ > module is a data collector. It collects, processes and returns processed data to the orchestrator.
+
+ - job
+ > job is a module instance with specific settings.
+
+
+Package provides:
+ - CLI parser
+ - plugin orchestrator (loads configurations, creates and serves jobs)
+
+You are responsible only for __creating modules__.
+
+## Custom plugin example
+
+[Yep! So easy!](https://github.com/netdata/go.d.plugin/blob/master/examples/simple/main.go)
+
+## How to write a Module
+
+Module is responsible for **charts creating** and **data collecting**. Implement Module interface and that is it.
+
+```go
+type Module interface {
+ // Init does initialization.
+ // If it returns false, the job will be disabled.
+ Init() bool
+
+ // Check is called after Init.
+ // If it returns false, the job will be disabled.
+ Check() bool
+
+ // Charts returns the chart definition.
+ // Make sure not to share returned instance.
+ Charts() *Charts
+
+ // Collect collects metrics.
+ Collect() map[string]int64
+
+ // SetLogger sets logger.
+ SetLogger(l *logger.Logger)
+
+ // Cleanup performs cleanup if needed.
+ Cleanup()
+}
+
+// Base is a helper struct. All modules should embed this struct.
+type Base struct {
+ *logger.Logger
+}
+
+// SetLogger sets logger.
+func (b *Base) SetLogger(l *logger.Logger) { b.Logger = l }
+
+```
+
+## How to write a Plugin
+
+Since plugin is a set of modules all you need is:
+ - write module(s)
+ - add module(s) to the plugins [registry](https://github.com/netdata/go.d.plugin/blob/master/plugin/module/registry.go)
+ - start the plugin
+
+
+## How to integrate your plugin into Netdata
+
+Three simple steps:
+ - move the plugin to the `plugins.d` dir.
+ - add plugin configuration file to the `etc/netdata/` dir.
+ - add modules configuration files to the `etc/netdata/<DIR_NAME>/` dir.
+
+Congratulations!
+
+## Configurations
+
+Configurations are written in [YAML](https://yaml.org/).
+
+ - plugin configuration:
+
+```yaml
+
+# Enable/disable the whole plugin.
+enabled: yes
+
+# Default enable/disable value for all modules.
+default_run: yes
+
+# Maximum number of used CPUs. Zero means no limit.
+max_procs: 0
+
+# Enable/disable specific plugin module
+modules:
+# module_name1: yes
+# module_name2: yes
+
+```
+
+ - module configuration
+
+```yaml
+# [ GLOBAL ]
+update_every: 1
+autodetection_retry: 0
+
+# [ JOBS ]
+jobs:
+ - name: job1
+ param1: value1
+ param2: value2
+
+ - name: job2
+ param1: value1
+ param2: value2
+```
+
+Plugin uses `yaml.Unmarshal` to add configuration parameters to the module. Please use `yaml` tags!
+
+## Debug
+
+Plugin CLI:
+```
+Usage:
+ plugin [OPTIONS] [update every]
+
+Application Options:
+ -d, --debug debug mode
+ -m, --modules= modules name (default: all)
+ -c, --config= config dir
+
+Help Options:
+ -h, --help Show this help message
+
+```
+
+Specific module debug:
+```
+# become user netdata
+sudo su -s /bin/bash netdata
+
+# run plugin in debug mode
+./<plugin_name> -d -m <module_name>
+```
+
+Change `<plugin_name>` to your plugin name and `<module_name>` to the module name you want to debug.
diff --git a/agent/agent.go b/agent/agent.go
new file mode 100644
index 0000000..52c27c0
--- /dev/null
+++ b/agent/agent.go
@@ -0,0 +1,206 @@
+package agent
+
+import (
+ "context"
+ "io"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/netdata/go.d.plugin/agent/job/build"
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/agent/job/discovery"
+ "github.com/netdata/go.d.plugin/agent/job/registry"
+ "github.com/netdata/go.d.plugin/agent/job/run"
+ "github.com/netdata/go.d.plugin/agent/job/state"
+ "github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/agent/netdataapi"
+ "github.com/netdata/go.d.plugin/logger"
+ "github.com/netdata/go.d.plugin/pkg/multipath"
+
+ "github.com/mattn/go-isatty"
+)
+
+var isTerminal = isatty.IsTerminal(os.Stdout.Fd())
+
+// Config is an Agent configuration.
+type Config struct {
+ Name string
+ ConfDir []string
+ ModulesConfDir []string
+ ModulesSDConfPath []string
+ StateFile string
+ LockDir string
+ ModuleRegistry module.Registry
+ RunModule string
+ MinUpdateEvery int
+}
+
+// Agent represents orchestrator.
+type Agent struct {
+ Name string
+ ConfDir multipath.MultiPath
+ ModulesConfDir multipath.MultiPath
+ ModulesSDConfPath []string
+ StateFile string
+ LockDir string
+ RunModule string
+ MinUpdateEvery int
+ ModuleRegistry module.Registry
+ Out io.Writer
+ api *netdataapi.API
+ *logger.Logger
+}
+
+// New creates a new Agent.
+func New(cfg Config) *Agent {
+ p := &Agent{
+ Name: cfg.Name,
+ ConfDir: cfg.ConfDir,
+ ModulesConfDir: cfg.ModulesConfDir,
+ ModulesSDConfPath: cfg.ModulesSDConfPath,
+ StateFile: cfg.StateFile,
+ LockDir: cfg.LockDir,
+ RunModule: cfg.RunModule,
+ MinUpdateEvery: cfg.MinUpdateEvery,
+ ModuleRegistry: module.DefaultRegistry,
+ Out: os.Stdout,
+ }
+
+ logger.Prefix = p.Name
+ p.Logger = logger.New("main", "main")
+ p.api = netdataapi.New(p.Out)
+
+ return p
+}
+
+// Run
+func (a *Agent) Run() {
+ go a.signalHandling()
+ go a.keepAlive()
+ serve(a)
+}
+
+func serve(p *Agent) {
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, syscall.SIGHUP)
+ var wg sync.WaitGroup
+
+ for {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ wg.Add(1)
+ go func() { defer wg.Done(); p.run(ctx) }()
+
+ sig := <-ch
+ p.Infof("received %s signal (%d), stopping running instance", sig, sig)
+ cancel()
+ wg.Wait()
+ time.Sleep(time.Second)
+ }
+}
+
+func (a *Agent) run(ctx context.Context) {
+ a.Info("instance is started")
+ defer func() { a.Info("instance is stopped") }()
+
+ cfg := a.loadPluginConfig()
+ a.Infof("using config: %s", cfg)
+ if !cfg.Enabled {
+ a.Info("plugin is disabled in the configuration file, exiting...")
+ if isTerminal {
+ os.Exit(0)
+ }
+ _ = a.api.DISABLE()
+ return
+ }
+
+ enabled := a.loadEnabledModules(cfg)
+ if len(enabled) == 0 {
+ a.Info("no modules to run")
+ if isTerminal {
+ os.Exit(0)
+ }
+ _ = a.api.DISABLE()
+ return
+ }
+
+ discCfg := a.buildDiscoveryConf(enabled)
+
+ discoverer, err := discovery.NewManager(discCfg)
+ if err != nil {
+ a.Error(err)
+ if isTerminal {
+ os.Exit(0)
+ }
+ return
+ }
+
+ runner := run.NewManager()
+
+ builder := build.NewManager()
+ builder.Runner = runner
+ builder.PluginName = a.Name
+ builder.Out = a.Out
+ builder.Modules = enabled
+
+ if a.LockDir != "" {
+ builder.Registry = registry.NewFileLockRegistry(a.LockDir)
+ }
+
+ var saver *state.Manager
+ if !isTerminal && a.StateFile != "" {
+ saver = state.NewManager(a.StateFile)
+ builder.CurState = saver
+ if store, err := state.Load(a.StateFile); err != nil {
+ a.Warningf("couldn't load state file: %v", err)
+ } else {
+ builder.PrevState = store
+ }
+ }
+
+ in := make(chan []*confgroup.Group)
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go func() { defer wg.Done(); runner.Run(ctx) }()
+
+ wg.Add(1)
+ go func() { defer wg.Done(); builder.Run(ctx, in) }()
+
+ wg.Add(1)
+ go func() { defer wg.Done(); discoverer.Run(ctx, in) }()
+
+ if saver != nil {
+ wg.Add(1)
+ go func() { defer wg.Done(); saver.Run(ctx) }()
+ }
+
+ wg.Wait()
+ <-ctx.Done()
+ runner.Cleanup()
+}
+
+func (a *Agent) signalHandling() {
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM, syscall.SIGPIPE)
+
+ sig := <-ch
+ a.Infof("received %s signal (%d). Terminating...", sig, sig)
+ os.Exit(0)
+}
+
+func (a *Agent) keepAlive() {
+ if isTerminal {
+ return
+ }
+
+ tk := time.NewTicker(time.Second)
+ defer tk.Stop()
+
+ for range tk.C {
+ _ = a.api.EMPTYLINE()
+ }
+}
diff --git a/agent/agent_test.go b/agent/agent_test.go
new file mode 100644
index 0000000..f37fb1c
--- /dev/null
+++ b/agent/agent_test.go
@@ -0,0 +1,106 @@
+package agent
+
+import (
+ "bytes"
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/netdata/go.d.plugin/agent/module"
+ "github.com/stretchr/testify/assert"
+)
+
+// TODO: tech debt
+func TestNew(t *testing.T) {
+
+}
+
+func TestAgent_Run(t *testing.T) {
+ a := New(Config{
+ Name: "",
+ ConfDir: nil,
+ ModulesConfDir: nil,
+ ModulesSDConfPath: nil,
+ StateFile: "",
+ ModuleRegistry: nil,
+ RunModule: "",
+ MinUpdateEvery: 0,
+ })
+
+ var buf bytes.Buffer
+ a.Out = &buf
+
+ var mux sync.Mutex
+ stats := make(map[string]int)
+ a.ModuleRegistry = prepareRegistry(&mux, stats, "module1", "module2")
+
+ ctx, cancel := context.WithCancel(context.Background())
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go func() { defer wg.Done(); a.run(ctx) }()
+
+ time.Sleep(time.Second * 2)
+ cancel()
+ wg.Wait()
+
+ assert.Equalf(t, 1, stats["module1_init"], "module1 init")
+ assert.Equalf(t, 1, stats["module2_init"], "module2 init")
+ assert.Equalf(t, 1, stats["module1_check"], "module1 check")
+ assert.Equalf(t, 1, stats["module2_check"], "module2 check")
+ assert.Equalf(t, 1, stats["module1_charts"], "module1 charts")
+ assert.Equalf(t, 1, stats["module2_charts"], "module2 charts")
+ assert.Truef(t, stats["module1_collect"] > 0, "module1 collect")
+ assert.Truef(t, stats["module2_collect"] > 0, "module2 collect")
+ assert.Equalf(t, 1, stats["module1_cleanup"], "module1 cleanup")
+ assert.Equalf(t, 1, stats["module2_cleanup"], "module2 cleanup")
+ assert.True(t, buf.String() != "")
+}
+
+func prepareRegistry(mux *sync.Mutex, stats map[string]int, names ...string) module.Registry {
+ reg := module.Registry{}
+ for _, name := range names {
+ name := name
+ reg.Register(name, module.Creator{
+ Create: func() module.Module { return prepareMockModule(name, mux, stats) },
+ })
+ }
+ return reg
+}
+
+func prepareMockModule(name string, mux *sync.Mutex, stats map[string]int) module.Module {
+ return &module.MockModule{
+ InitFunc: func() bool {
+ mux.Lock()
+ defer mux.Unlock()
+ stats[name+"_init"]++
+ return true
+ },
+ CheckFunc: func() bool {
+ mux.Lock()
+ defer mux.Unlock()
+ stats[name+"_check"]++
+ return true
+ },
+ ChartsFunc: func() *module.Charts {
+ mux.Lock()
+ defer mux.Unlock()
+ stats[name+"_charts"]++
+ return &module.Charts{
+ &module.Chart{ID: "id", Title: "title", Units: "units", Dims: module.Dims{{ID: "id1"}}},
+ }
+ },
+ CollectFunc: func() map[string]int64 {
+ mux.Lock()
+ defer mux.Unlock()
+ stats[name+"_collect"]++
+ return map[string]int64{"id1": 1}
+ },
+ CleanupFunc: func() {
+ mux.Lock()
+ defer mux.Unlock()
+ stats[name+"_cleanup"]++
+ },
+ }
+}
diff --git a/agent/job/build/build.go b/agent/job/build/build.go
new file mode 100644
index 0000000..f99c3e9
--- /dev/null
+++ b/agent/job/build/build.go
@@ -0,0 +1,355 @@
+package build
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ jobpkg "github.com/netdata/go.d.plugin/agent/job"
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/logger"
+
+ "gopkg.in/yaml.v2"
+)
+
+type Runner interface {
+ Start(job jobpkg.Job)
+ Stop(fullName string)
+}
+
+type StateSaver interface {
+ Save(cfg confgroup.Config, state string)
+ Remove(cfg confgroup.Config)
+}
+
+type State interface {
+ Contains(cfg confgroup.Config, states ...string) bool
+}
+
+type Registry interface {
+ Register(name string) (bool, error)
+ Unregister(name string) error
+}
+
+type (
+ dummySaver struct{}
+ dummyState struct{}
+ dummyRegistry struct{}
+)
+
+func (d dummySaver) Save(_ confgroup.Config, _ string) {}
+func (d dummySaver) Remove(_ confgroup.Config) {}
+
+func (d dummyState) Contains(_ confgroup.Config, _ ...string) bool { return false }
+
+func (d dummyRegistry) Register(_ string) (bool, error) { return true, nil }
+func (d dummyRegistry) Unregister(_ string) error { return nil }
+
+type state = string
+
+const (
+ success state = "success" // successfully started
+ retry state = "retry" // failed, but we need keep trying auto-detection
+ failed state = "failed" // failed
+ duplicateLocal state = "duplicate_local" // a job with the same FullName is started
+ duplicateGlobal state = "duplicate_global" // a job with the same FullName is registered by another plugin
+ registrationError state = "registration_error" // an error during registration (only 'too many open files')
+ buildError state = "build_error" // an error during building
+)
+
+type (
+ Manager struct {
+ PluginName string
+ Out io.Writer
+ Modules module.Registry
+ *logger.Logger
+
+ Runner Runner
+ CurState StateSaver
+ PrevState State
+ Registry Registry
+
+ grpCache *groupCache
+ startCache *startedCache
+ retryCache *retryCache
+
+ addCh chan []confgroup.Config
+ removeCh chan []confgroup.Config
+ retryCh chan confgroup.Config
+ }
+)
+
+func NewManager() *Manager {
+ mgr := &Manager{
+ CurState: dummySaver{},
+ PrevState: dummyState{},
+ Registry: dummyRegistry{},
+ Out: ioutil.Discard,
+ Logger: logger.New("build", "manager"),
+ grpCache: newGroupCache(),
+ startCache: newStartedCache(),
+ retryCache: newRetryCache(),
+ addCh: make(chan []confgroup.Config),
+ removeCh: make(chan []confgroup.Config),
+ retryCh: make(chan confgroup.Config),
+ }
+ return mgr
+}
+
+func (m *Manager) Run(ctx context.Context, in chan []*confgroup.Group) {
+ m.Info("instance is started")
+ defer func() { m.cleanup(); m.Info("instance is stopped") }()
+
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go func() { defer wg.Done(); m.runGroupProcessing(ctx, in) }()
+
+ wg.Add(1)
+ go func() { defer wg.Done(); m.runConfigProcessing(ctx) }()
+
+ wg.Wait()
+ <-ctx.Done()
+}
+
+func (m *Manager) cleanup() {
+ for _, task := range *m.retryCache {
+ task.cancel()
+ }
+ for name := range *m.startCache {
+ _ = m.Registry.Unregister(name)
+ }
+}
+
+func (m *Manager) runGroupProcessing(ctx context.Context, in <-chan []*confgroup.Group) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case groups := <-in:
+ for _, group := range groups {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ m.processGroup(ctx, group)
+ }
+ }
+ }
+ }
+}
+
+func (m *Manager) processGroup(ctx context.Context, group *confgroup.Group) {
+ if group == nil {
+ return
+ }
+ added, removed := m.grpCache.put(group)
+ m.Debugf("received config group ('%s'): %d jobs (added: %d, removed: %d)",
+ group.Source, len(group.Configs), len(added), len(removed))
+
+ select {
+ case <-ctx.Done():
+ return
+ case m.removeCh <- removed:
+ }
+
+ select {
+ case <-ctx.Done():
+ return
+ case m.addCh <- added:
+ }
+}
+
+func (m *Manager) runConfigProcessing(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case cfgs := <-m.addCh:
+ m.handleAdd(ctx, cfgs)
+ case cfgs := <-m.removeCh:
+ m.handleRemove(ctx, cfgs)
+ case cfg := <-m.retryCh:
+ m.handleAddCfg(ctx, cfg)
+ }
+ }
+}
+
+func (m *Manager) handleAdd(ctx context.Context, cfgs []confgroup.Config) {
+ for _, cfg := range cfgs {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ m.handleAddCfg(ctx, cfg)
+ }
+ }
+}
+
+func (m *Manager) handleRemove(ctx context.Context, cfgs []confgroup.Config) {
+ for _, cfg := range cfgs {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ m.handleRemoveCfg(cfg)
+ }
+ }
+}
+
+func (m *Manager) handleAddCfg(ctx context.Context, cfg confgroup.Config) {
+ if m.startCache.has(cfg) {
+ m.Infof("%s[%s] job is being served by another job, skipping it", cfg.Module(), cfg.Name())
+ m.CurState.Save(cfg, duplicateLocal)
+ return
+ }
+
+ task, isRetry := m.retryCache.lookup(cfg)
+ if isRetry {
+ task.cancel()
+ m.retryCache.remove(cfg)
+ }
+
+ job, err := m.buildJob(cfg)
+ if err != nil {
+ m.Warningf("couldn't build %s[%s]: %v", cfg.Module(), cfg.Name(), err)
+ m.CurState.Save(cfg, buildError)
+ return
+ }
+
+ if isRetry {
+ job.AutoDetectEvery = task.timeout
+ job.AutoDetectTries = task.retries
+ } else if job.AutoDetectionEvery() == 0 {
+ switch {
+ case m.PrevState.Contains(cfg, success, retry):
+ m.Infof("%s[%s] job last state is active/retry, applying recovering settings", cfg.Module(), cfg.Name())
+ job.AutoDetectEvery = 30
+ job.AutoDetectTries = 11
+ case isInsideK8sCluster() && cfg.Provider() == "file watcher":
+ m.Infof("%s[%s] is k8s job, applying recovering settings", cfg.Module(), cfg.Name())
+ job.AutoDetectEvery = 10
+ job.AutoDetectTries = 7
+ }
+ }
+
+ switch detection(job) {
+ case success:
+ if ok, err := m.Registry.Register(cfg.FullName()); ok || err != nil && !isTooManyOpenFiles(err) {
+ m.CurState.Save(cfg, success)
+ m.Runner.Start(job)
+ m.startCache.put(cfg)
+ } else if isTooManyOpenFiles(err) {
+ m.Error(err)
+ m.CurState.Save(cfg, registrationError)
+ } else {
+ m.Infof("%s[%s] job is being served by another plugin, skipping it", cfg.Module(), cfg.Name())
+ m.CurState.Save(cfg, duplicateGlobal)
+ }
+ case retry:
+ m.Infof("%s[%s] job detection failed, will retry in %d seconds",
+ cfg.Module(), cfg.Name(), job.AutoDetectionEvery())
+ m.CurState.Save(cfg, retry)
+ ctx, cancel := context.WithCancel(ctx)
+ m.retryCache.put(cfg, retryTask{
+ cancel: cancel,
+ timeout: job.AutoDetectionEvery(),
+ retries: job.AutoDetectTries,
+ })
+ timeout := time.Second * time.Duration(job.AutoDetectionEvery())
+ go runRetryTask(ctx, m.retryCh, cfg, timeout)
+ case failed:
+ m.CurState.Save(cfg, failed)
+ default:
+ m.Warningf("%s[%s] job detection: unknown state", cfg.Module(), cfg.Name())
+ }
+}
+
+func (m *Manager) handleRemoveCfg(cfg confgroup.Config) {
+ defer m.CurState.Remove(cfg)
+
+ if m.startCache.has(cfg) {
+ m.Runner.Stop(cfg.FullName())
+ _ = m.Registry.Unregister(cfg.FullName())
+ m.startCache.remove(cfg)
+ }
+
+ if task, ok := m.retryCache.lookup(cfg); ok {
+ task.cancel()
+ m.retryCache.remove(cfg)
+ }
+}
+
+func (m *Manager) buildJob(cfg confgroup.Config) (*module.Job, error) {
+ creator, ok := m.Modules[cfg.Module()]
+ if !ok {
+ return nil, fmt.Errorf("can not find %s module", cfg.Module())
+ }
+
+ m.Debugf("building %s[%s] job, config: %v", cfg.Module(), cfg.Name(), cfg)
+ mod := creator.Create()
+ if err := unmarshal(cfg, mod); err != nil {
+ return nil, err
+ }
+
+ job := module.NewJob(module.JobConfig{
+ PluginName: m.PluginName,
+ Name: cfg.Name(),
+ ModuleName: cfg.Module(),
+ FullName: cfg.FullName(),
+ UpdateEvery: cfg.UpdateEvery(),
+ AutoDetectEvery: cfg.AutoDetectionRetry(),
+ Priority: cfg.Priority(),
+ Module: mod,
+ Out: m.Out,
+ })
+ return job, nil
+}
+
+func detection(job jobpkg.Job) state {
+ if !job.AutoDetection() {
+ if job.RetryAutoDetection() {
+ return retry
+ } else {
+ return failed
+ }
+ }
+ return success
+}
+
+func runRetryTask(ctx context.Context, in chan<- confgroup.Config, cfg confgroup.Config, timeout time.Duration) {
+ t := time.NewTimer(timeout)
+ defer t.Stop()
+
+ select {
+ case <-ctx.Done():
+ case <-t.C:
+ select {
+ case <-ctx.Done():
+ case in <- cfg:
+ }
+ }
+}
+
+func unmarshal(conf interface{}, module interface{}) error {
+ bs, err := yaml.Marshal(conf)
+ if err != nil {
+ return err
+ }
+ return yaml.Unmarshal(bs, module)
+}
+
+func isInsideK8sCluster() bool {
+ host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
+ return host != "" && port != ""
+}
+
+func isTooManyOpenFiles(err error) bool {
+ return err != nil && strings.Contains(err.Error(), "too many open files")
+}
diff --git a/agent/job/build/build_test.go b/agent/job/build/build_test.go
new file mode 100644
index 0000000..78ca514
--- /dev/null
+++ b/agent/job/build/build_test.go
@@ -0,0 +1,107 @@
+package build
+
+import (
+ "bytes"
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/agent/job/run"
+ "github.com/netdata/go.d.plugin/agent/module"
+ "github.com/stretchr/testify/assert"
+)
+
+// TODO: tech dept
+func TestNewManager(t *testing.T) {
+
+}
+
+// TODO: tech dept
+func TestManager_Run(t *testing.T) {
+ groups := []*confgroup.Group{
+ {
+ Source: "source",
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "success",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ {
+ "name": "name",
+ "module": "success",
+ "update_every": module.UpdateEvery + 1,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ {
+ "name": "name",
+ "module": "fail",
+ "update_every": module.UpdateEvery + 1,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ },
+ },
+ }
+ var buf bytes.Buffer
+ builder := NewManager()
+ builder.Modules = prepareMockRegistry()
+ builder.Out = &buf
+ builder.PluginName = "test.plugin"
+ runner := run.NewManager()
+ builder.Runner = runner
+
+ ctx, cancel := context.WithCancel(context.Background())
+ in := make(chan []*confgroup.Group)
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go func() { defer wg.Done(); runner.Run(ctx) }()
+
+ wg.Add(1)
+ go func() { defer wg.Done(); builder.Run(ctx, in) }()
+
+ select {
+ case in <- groups:
+ case <-time.After(time.Second * 2):
+ }
+
+ time.Sleep(time.Second * 5)
+ cancel()
+ wg.Wait()
+ runner.Cleanup()
+ assert.True(t, buf.String() != "")
+}
+
+func prepareMockRegistry() module.Registry {
+ reg := module.Registry{}
+ reg.Register("success", module.Creator{
+ Create: func() module.Module {
+ return &module.MockModule{
+ InitFunc: func() bool { return true },
+ CheckFunc: func() bool { return true },
+ ChartsFunc: func() *module.Charts {
+ return &module.Charts{
+ &module.Chart{ID: "id", Title: "title", Units: "units", Dims: module.Dims{{ID: "id1"}}},
+ }
+ },
+ CollectFunc: func() map[string]int64 {
+ return map[string]int64{"id1": 1}
+ },
+ }
+ },
+ })
+ reg.Register("fail", module.Creator{
+ Create: func() module.Module {
+ return &module.MockModule{
+ InitFunc: func() bool { return false },
+ }
+ },
+ })
+ return reg
+}
diff --git a/agent/job/build/cache.go b/agent/job/build/cache.go
new file mode 100644
index 0000000..7851958
--- /dev/null
+++ b/agent/job/build/cache.go
@@ -0,0 +1,137 @@
+package build
+
+import (
+ "context"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+)
+
+type (
+ fullName = string
+ grpSource = string
+ cfgHash = uint64
+ cfgCount = uint
+
+ startedCache map[fullName]struct{}
+ retryCache map[cfgHash]retryTask
+ groupCache struct {
+ global map[cfgHash]cfgCount
+ source map[grpSource]map[cfgHash]confgroup.Config
+ }
+ retryTask struct {
+ cancel context.CancelFunc
+ timeout int
+ retries int
+ }
+)
+
+func newStartedCache() *startedCache {
+ return &startedCache{}
+}
+
+func newRetryCache() *retryCache {
+ return &retryCache{}
+}
+
+func newGroupCache() *groupCache {
+ return &groupCache{
+ global: make(map[cfgHash]cfgCount),
+ source: make(map[grpSource]map[cfgHash]confgroup.Config),
+ }
+}
+
+func (c startedCache) put(cfg confgroup.Config) {
+ c[cfg.FullName()] = struct{}{}
+}
+func (c startedCache) remove(cfg confgroup.Config) {
+ delete(c, cfg.FullName())
+}
+func (c startedCache) has(cfg confgroup.Config) bool {
+ _, ok := c[cfg.FullName()]
+ return ok
+}
+
+func (c retryCache) put(cfg confgroup.Config, retry retryTask) {
+ c[cfg.Hash()] = retry
+}
+func (c retryCache) remove(cfg confgroup.Config) {
+ delete(c, cfg.Hash())
+}
+func (c retryCache) lookup(cfg confgroup.Config) (retryTask, bool) {
+ v, ok := c[cfg.Hash()]
+ return v, ok
+}
+
+func (c *groupCache) put(group *confgroup.Group) (added, removed []confgroup.Config) {
+ if group == nil {
+ return
+ }
+ if len(group.Configs) == 0 {
+ return c.putEmpty(group)
+ }
+ return c.putNotEmpty(group)
+}
+
+func (c *groupCache) putEmpty(group *confgroup.Group) (added, removed []confgroup.Config) {
+ set, ok := c.source[group.Source]
+ if !ok {
+ return nil, nil
+ }
+
+ for hash, cfg := range set {
+ c.global[hash]--
+ if c.global[hash] == 0 {
+ removed = append(removed, cfg)
+ }
+ delete(set, hash)
+ }
+ delete(c.source, group.Source)
+ return nil, removed
+}
+
+func (c *groupCache) putNotEmpty(group *confgroup.Group) (added, removed []confgroup.Config) {
+ set, ok := c.source[group.Source]
+ if !ok {
+ set = make(map[cfgHash]confgroup.Config)
+ c.source[group.Source] = set
+ }
+
+ seen := make(map[uint64]struct{})
+
+ for _, cfg := range group.Configs {
+ hash := cfg.Hash()
+ seen[hash] = struct{}{}
+
+ if _, ok := set[hash]; ok {
+ continue
+ }
+
+ set[hash] = cfg
+ if c.global[hash] == 0 {
+ added = append(added, cfg)
+ }
+ c.global[hash]++
+ }
+
+ if !ok {
+ return added, nil
+ }
+
+ for hash, cfg := range set {
+ if _, ok := seen[hash]; ok {
+ continue
+ }
+
+ delete(set, hash)
+ c.global[hash]--
+ if c.global[hash] == 0 {
+ removed = append(removed, cfg)
+ }
+ }
+
+ if ok && len(set) == 0 {
+ delete(c.source, group.Source)
+ }
+
+ return added, removed
+}
diff --git a/agent/job/build/cache_test.go b/agent/job/build/cache_test.go
new file mode 100644
index 0000000..fcaf0d5
--- /dev/null
+++ b/agent/job/build/cache_test.go
@@ -0,0 +1,134 @@
+package build
+
+import (
+ "sort"
+ "testing"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestJobCache_put(t *testing.T) {
+ tests := map[string]struct {
+ prepareGroups []confgroup.Group
+ groups []confgroup.Group
+ expectedAdd []confgroup.Config
+ expectedRemove []confgroup.Config
+ }{
+ "new group, new configs": {
+ groups: []confgroup.Group{
+ prepareGroup("source", prepareCfg("name", "module")),
+ },
+ expectedAdd: []confgroup.Config{
+ prepareCfg("name", "module"),
+ },
+ },
+ "several equal updates for the same group": {
+ groups: []confgroup.Group{
+ prepareGroup("source", prepareCfg("name", "module")),
+ prepareGroup("source", prepareCfg("name", "module")),
+ prepareGroup("source", prepareCfg("name", "module")),
+ prepareGroup("source", prepareCfg("name", "module")),
+ prepareGroup("source", prepareCfg("name", "module")),
+ },
+ expectedAdd: []confgroup.Config{
+ prepareCfg("name", "module"),
+ },
+ },
+ "empty group update for cached group": {
+ prepareGroups: []confgroup.Group{
+ prepareGroup("source", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
+ },
+ groups: []confgroup.Group{
+ prepareGroup("source"),
+ },
+ expectedRemove: []confgroup.Config{
+ prepareCfg("name1", "module"),
+ prepareCfg("name2", "module"),
+ },
+ },
+ "changed group update for cached group": {
+ prepareGroups: []confgroup.Group{
+ prepareGroup("source", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
+ },
+ groups: []confgroup.Group{
+ prepareGroup("source", prepareCfg("name2", "module")),
+ },
+ expectedRemove: []confgroup.Config{
+ prepareCfg("name1", "module"),
+ },
+ },
+ "empty group update for uncached group": {
+ groups: []confgroup.Group{
+ prepareGroup("source"),
+ prepareGroup("source"),
+ },
+ },
+ "several updates with different source but same context": {
+ groups: []confgroup.Group{
+ prepareGroup("source1", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
+ prepareGroup("source2", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
+ },
+ expectedAdd: []confgroup.Config{
+ prepareCfg("name1", "module"),
+ prepareCfg("name2", "module"),
+ },
+ },
+ "have equal configs from 2 sources, get empty group for the 1st source": {
+ prepareGroups: []confgroup.Group{
+ prepareGroup("source1", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
+ prepareGroup("source2", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
+ },
+ groups: []confgroup.Group{
+ prepareGroup("source2"),
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ cache := newGroupCache()
+
+ for _, group := range test.prepareGroups {
+ cache.put(&group)
+ }
+
+ var added, removed []confgroup.Config
+ for _, group := range test.groups {
+ a, r := cache.put(&group)
+ added = append(added, a...)
+ removed = append(removed, r...)
+ }
+
+ sortConfigs(added)
+ sortConfigs(removed)
+ sortConfigs(test.expectedAdd)
+ sortConfigs(test.expectedRemove)
+
+ assert.Equalf(t, test.expectedAdd, added, "added configs")
+ assert.Equalf(t, test.expectedRemove, removed, "removed configs, step '%s' %d")
+ })
+ }
+}
+
+func prepareGroup(source string, cfgs ...confgroup.Config) confgroup.Group {
+ return confgroup.Group{
+ Configs: cfgs,
+ Source: source,
+ }
+}
+
+func prepareCfg(name, module string) confgroup.Config {
+ return confgroup.Config{
+ "name": name,
+ "module": module,
+ }
+}
+
+func sortConfigs(cfgs []confgroup.Config) {
+ if len(cfgs) == 0 {
+ return
+ }
+ sort.Slice(cfgs, func(i, j int) bool { return cfgs[i].FullName() < cfgs[j].FullName() })
+}
diff --git a/agent/job/confgroup/group.go b/agent/job/confgroup/group.go
new file mode 100644
index 0000000..2f3bf0d
--- /dev/null
+++ b/agent/job/confgroup/group.go
@@ -0,0 +1,86 @@
+package confgroup
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/netdata/go.d.plugin/agent/module"
+
+ "github.com/ilyam8/hashstructure"
+)
+
+type Group struct {
+ Configs []Config
+ Source string
+}
+
+type Config map[string]interface{}
+
+func (c Config) HashIncludeMap(_ string, k, _ interface{}) (bool, error) {
+ s := k.(string)
+ return !(strings.HasPrefix(s, "__") && strings.HasSuffix(s, "__")), nil
+}
+
+func (c Config) Name() string { v, _ := c.get("name").(string); return v }
+func (c Config) Module() string { v, _ := c.get("module").(string); return v }
+func (c Config) FullName() string { return fullName(c.Name(), c.Module()) }
+func (c Config) UpdateEvery() int { v, _ := c.get("update_every").(int); return v }
+func (c Config) AutoDetectionRetry() int { v, _ := c.get("autodetection_retry").(int); return v }
+func (c Config) Priority() int { v, _ := c.get("priority").(int); return v }
+func (c Config) Hash() uint64 { return calcHash(c) }
+func (c Config) Source() string { v, _ := c.get("__source__").(string); return v }
+func (c Config) Provider() string { v, _ := c.get("__provider__").(string); return v }
+func (c Config) SetModule(source string) { c.set("module", source) }
+func (c Config) SetSource(source string) { c.set("__source__", source) }
+func (c Config) SetProvider(source string) { c.set("__provider__", source) }
+
+func (c Config) set(key string, value interface{}) { c[key] = value }
+func (c Config) get(key string) interface{} { return c[key] }
+
+func (c Config) Apply(def Default) {
+ if c.UpdateEvery() <= 0 {
+ v := firstPositive(def.UpdateEvery, module.UpdateEvery)
+ c.set("update_every", v)
+ }
+ if c.AutoDetectionRetry() <= 0 {
+ v := firstPositive(def.AutoDetectionRetry, module.AutoDetectionRetry)
+ c.set("autodetection_retry", v)
+ }
+ if c.Priority() <= 0 {
+ v := firstPositive(def.Priority, module.Priority)
+ c.set("priority", v)
+ }
+ if c.UpdateEvery() < def.MinUpdateEvery && def.MinUpdateEvery > 0 {
+ c.set("update_every", def.MinUpdateEvery)
+ }
+ if c.Name() == "" {
+ c.set("name", c.Module())
+ } else {
+ c.set("name", cleanName(c.Name()))
+ }
+}
+
+func cleanName(name string) string {
+ return reSpace.ReplaceAllString(name, "_")
+}
+
+var reSpace = regexp.MustCompile(`\s+`)
+
+func fullName(name, module string) string {
+ if name == module {
+ return name
+ }
+ return module + "_" + name
+}
+
+func calcHash(obj interface{}) uint64 {
+ hash, _ := hashstructure.Hash(obj, nil)
+ return hash
+}
+
+func firstPositive(value int, others ...int) int {
+ if value > 0 || len(others) == 0 {
+ return value
+ }
+ return firstPositive(others[0], others[1:]...)
+}
diff --git a/agent/job/confgroup/group_test.go b/agent/job/confgroup/group_test.go
new file mode 100644
index 0000000..078077a
--- /dev/null
+++ b/agent/job/confgroup/group_test.go
@@ -0,0 +1,322 @@
+package confgroup
+
+import (
+ "testing"
+
+ "github.com/netdata/go.d.plugin/agent/module"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestConfig_Name(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ expected interface{}
+ }{
+ "string": {cfg: Config{"name": "name"}, expected: "name"},
+ "empty string": {cfg: Config{"name": ""}, expected: ""},
+ "not string": {cfg: Config{"name": 0}, expected: ""},
+ "not set": {cfg: Config{}, expected: ""},
+ "nil cfg": {expected: ""},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.expected, test.cfg.Name())
+ })
+ }
+}
+
+func TestConfig_Module(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ expected interface{}
+ }{
+ "string": {cfg: Config{"module": "module"}, expected: "module"},
+ "empty string": {cfg: Config{"module": ""}, expected: ""},
+ "not string": {cfg: Config{"module": 0}, expected: ""},
+ "not set": {cfg: Config{}, expected: ""},
+ "nil cfg": {expected: ""},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.expected, test.cfg.Module())
+ })
+ }
+}
+
+func TestConfig_FullName(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ expected interface{}
+ }{
+ "name == module": {cfg: Config{"name": "name", "module": "name"}, expected: "name"},
+ "name != module": {cfg: Config{"name": "name", "module": "module"}, expected: "module_name"},
+ "nil cfg": {expected: ""},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.expected, test.cfg.FullName())
+ })
+ }
+}
+
+func TestConfig_UpdateEvery(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ expected interface{}
+ }{
+ "int": {cfg: Config{"update_every": 1}, expected: 1},
+ "not int": {cfg: Config{"update_every": "1"}, expected: 0},
+ "not set": {cfg: Config{}, expected: 0},
+ "nil cfg": {expected: 0},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.expected, test.cfg.UpdateEvery())
+ })
+ }
+}
+
+func TestConfig_AutoDetectionRetry(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ expected interface{}
+ }{
+ "int": {cfg: Config{"autodetection_retry": 1}, expected: 1},
+ "not int": {cfg: Config{"autodetection_retry": "1"}, expected: 0},
+ "not set": {cfg: Config{}, expected: 0},
+ "nil cfg": {expected: 0},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.expected, test.cfg.AutoDetectionRetry())
+ })
+ }
+}
+
+func TestConfig_Priority(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ expected interface{}
+ }{
+ "int": {cfg: Config{"priority": 1}, expected: 1},
+ "not int": {cfg: Config{"priority": "1"}, expected: 0},
+ "not set": {cfg: Config{}, expected: 0},
+ "nil cfg": {expected: 0},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.expected, test.cfg.Priority())
+ })
+ }
+}
+
+func TestConfig_Hash(t *testing.T) {
+ tests := map[string]struct {
+ one, two Config
+ equal bool
+ }{
+ "same keys, no internal keys": {
+ one: Config{"name": "name"},
+ two: Config{"name": "name"},
+ equal: true,
+ },
+ "same keys, different internal keys": {
+ one: Config{"name": "name", "__key__": 1},
+ two: Config{"name": "name", "__value__": 1},
+ equal: true,
+ },
+ "same keys, same internal keys": {
+ one: Config{"name": "name", "__key__": 1},
+ two: Config{"name": "name", "__key__": 1},
+ equal: true,
+ },
+ "diff keys, no internal keys": {
+ one: Config{"name": "name1"},
+ two: Config{"name": "name2"},
+ equal: false,
+ },
+ "diff keys, different internal keys": {
+ one: Config{"name": "name1", "__key__": 1},
+ two: Config{"name": "name2", "__value__": 1},
+ equal: false,
+ },
+ "diff keys, same internal keys": {
+ one: Config{"name": "name1", "__key__": 1},
+ two: Config{"name": "name2", "__key__": 1},
+ equal: false,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if test.equal {
+ assert.Equal(t, test.one.Hash(), test.two.Hash())
+ } else {
+ assert.NotEqual(t, test.one.Hash(), test.two.Hash())
+ }
+ })
+ }
+ cfg := Config{"name": "name", "module": "module"}
+ assert.NotZero(t, cfg.Hash())
+}
+
+func TestConfig_SetModule(t *testing.T) {
+ cfg := Config{}
+ cfg.SetModule("name")
+
+ assert.Equal(t, cfg.Module(), "name")
+}
+
+func TestConfig_SetSource(t *testing.T) {
+ cfg := Config{}
+ cfg.SetSource("name")
+
+ assert.Equal(t, cfg.Source(), "name")
+}
+
+func TestConfig_SetProvider(t *testing.T) {
+ cfg := Config{}
+ cfg.SetProvider("name")
+
+ assert.Equal(t, cfg.Provider(), "name")
+}
+
+func TestConfig_Apply(t *testing.T) {
+ const jobDef = 11
+ const applyDef = 22
+ tests := map[string]struct {
+ def Default
+ origCfg Config
+ expectedCfg Config
+ }{
+ "+job +def": {
+ def: Default{
+ UpdateEvery: applyDef,
+ AutoDetectionRetry: applyDef,
+ Priority: applyDef,
+ },
+ origCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
+ expectedCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
+ },
+ "-job +def": {
+ def: Default{
+ UpdateEvery: applyDef,
+ AutoDetectionRetry: applyDef,
+ Priority: applyDef,
+ },
+ origCfg: Config{
+ "name": "name",
+ "module": "module",
+ },
+ expectedCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": applyDef,
+ "autodetection_retry": applyDef,
+ "priority": applyDef,
+ },
+ },
+ "-job -def (+global)": {
+ def: Default{},
+ origCfg: Config{
+ "name": "name",
+ "module": "module",
+ },
+ expectedCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ },
+ "adjust update_every (update_every < min update every)": {
+ def: Default{
+ MinUpdateEvery: jobDef + 10,
+ },
+ origCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ },
+ expectedCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef + 10,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ },
+ "do not adjust update_every (update_every > min update every)": {
+ def: Default{
+ MinUpdateEvery: 2,
+ },
+ origCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ },
+ expectedCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ },
+ "set name to module name if name not set": {
+ def: Default{},
+ origCfg: Config{
+ "module": "module",
+ },
+ expectedCfg: Config{
+ "name": "module",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ },
+ "clean name": {
+ def: Default{},
+ origCfg: Config{
+ "name": "na me",
+ "module": "module",
+ },
+ expectedCfg: Config{
+ "name": "na_me",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ test.origCfg.Apply(test.def)
+
+ assert.Equal(t, test.expectedCfg, test.origCfg)
+ })
+ }
+}
diff --git a/agent/job/confgroup/registry.go b/agent/job/confgroup/registry.go
new file mode 100644
index 0000000..7c4ef90
--- /dev/null
+++ b/agent/job/confgroup/registry.go
@@ -0,0 +1,21 @@
+package confgroup
+
+type Registry map[string]Default
+
+type Default struct {
+ MinUpdateEvery int `yaml:"-"`
+ UpdateEvery int `yaml:"update_every"`
+ AutoDetectionRetry int `yaml:"autodetection_retry"`
+ Priority int `yaml:"priority"`
+}
+
+func (r Registry) Register(name string, def Default) {
+ if name != "" {
+ r[name] = def
+ }
+}
+
+func (r Registry) Lookup(name string) (Default, bool) {
+ def, ok := r[name]
+ return def, ok
+}
diff --git a/agent/job/confgroup/registry_test.go b/agent/job/confgroup/registry_test.go
new file mode 100644
index 0000000..81af492
--- /dev/null
+++ b/agent/job/confgroup/registry_test.go
@@ -0,0 +1,42 @@
+package confgroup
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestRegistry_Register(t *testing.T) {
+ name := "module"
+ defaults := Default{
+ MinUpdateEvery: 1,
+ UpdateEvery: 1,
+ AutoDetectionRetry: 1,
+ Priority: 1,
+ }
+ expected := Registry{
+ name: defaults,
+ }
+
+ actual := Registry{}
+ actual.Register(name, defaults)
+
+ assert.Equal(t, expected, actual)
+}
+
+func TestRegistry_Lookup(t *testing.T) {
+ name := "module"
+ expected := Default{
+ MinUpdateEvery: 1,
+ UpdateEvery: 1,
+ AutoDetectionRetry: 1,
+ Priority: 1,
+ }
+ reg := Registry{}
+ reg.Register(name, expected)
+
+ actual, ok := reg.Lookup("module")
+
+ assert.True(t, ok)
+ assert.Equal(t, expected, actual)
+}
diff --git a/agent/job/discovery/cache.go b/agent/job/discovery/cache.go
new file mode 100644
index 0000000..1c3a3e8
--- /dev/null
+++ b/agent/job/discovery/cache.go
@@ -0,0 +1,36 @@
+package discovery
+
+import (
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+)
+
+type cache map[string]*confgroup.Group
+
+func newCache() *cache {
+ return &cache{}
+}
+
+func (c cache) update(groups []*confgroup.Group) {
+ if len(groups) == 0 {
+ return
+ }
+ for _, group := range groups {
+ if group != nil {
+ c[group.Source] = group
+ }
+ }
+}
+
+func (c cache) reset() {
+ for key := range c {
+ delete(c, key)
+ }
+}
+
+func (c cache) groups() []*confgroup.Group {
+ groups := make([]*confgroup.Group, 0, len(c))
+ for _, group := range c {
+ groups = append(groups, group)
+ }
+ return groups
+}
diff --git a/agent/job/discovery/dummy/discovery.go b/agent/job/discovery/dummy/discovery.go
new file mode 100644
index 0000000..01534c3
--- /dev/null
+++ b/agent/job/discovery/dummy/discovery.go
@@ -0,0 +1,84 @@
+package dummy
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/logger"
+)
+
+type Config struct {
+ Registry confgroup.Registry
+ Names []string
+}
+
+func validateConfig(cfg Config) error {
+ if len(cfg.Registry) == 0 {
+ return errors.New("empty config registry")
+ }
+ if len(cfg.Names) == 0 {
+ return errors.New("names not set")
+ }
+ return nil
+}
+
+type Discovery struct {
+ *logger.Logger
+ reg confgroup.Registry
+ names []string
+}
+
+func NewDiscovery(cfg Config) (*Discovery, error) {
+ if err := validateConfig(cfg); err != nil {
+ return nil, fmt.Errorf("config validation: %v", err)
+ }
+ d := &Discovery{
+ reg: cfg.Registry,
+ names: cfg.Names,
+ Logger: logger.New("discovery", "dummy"),
+ }
+ return d, nil
+}
+
+func (d Discovery) String() string {
+ return "dummy discovery"
+}
+
+func (d Discovery) Run(ctx context.Context, in chan<- []*confgroup.Group) {
+ d.Info("instance is started")
+ defer func() { d.Info("instance is stopped") }()
+
+ select {
+ case <-ctx.Done():
+ case in <- d.groups():
+ }
+ close(in)
+}
+
+func (d Discovery) groups() (groups []*confgroup.Group) {
+ for _, name := range d.names {
+ groups = append(groups, d.newCfgGroup(name))
+ }
+ return groups
+}
+
+func (d Discovery) newCfgGroup(name string) *confgroup.Group {
+ def, ok := d.reg.Lookup(name)
+ if !ok {
+ return nil
+ }
+
+ cfg := confgroup.Config{}
+ cfg.SetModule(name)
+ cfg.SetSource(name)
+ cfg.SetProvider("dummy")
+ cfg.Apply(def)
+
+ group := &confgroup.Group{
+ Configs: []confgroup.Config{cfg},
+ Source: name,
+ }
+ return group
+}
diff --git a/agent/job/discovery/dummy/discovery_test.go b/agent/job/discovery/dummy/discovery_test.go
new file mode 100644
index 0000000..35a0e58
--- /dev/null
+++ b/agent/job/discovery/dummy/discovery_test.go
@@ -0,0 +1,110 @@
+package dummy
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewDiscovery(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ wantErr bool
+ }{
+ "valid config": {
+ cfg: Config{
+ Registry: confgroup.Registry{"module1": confgroup.Default{}},
+ Names: []string{"module1", "module2"},
+ },
+ },
+ "invalid config, registry not set": {
+ cfg: Config{
+ Names: []string{"module1", "module2"},
+ },
+ wantErr: true,
+ },
+ "invalid config, names not set": {
+ cfg: Config{
+ Names: []string{"module1", "module2"},
+ },
+ wantErr: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ d, err := NewDiscovery(test.cfg)
+
+ if test.wantErr {
+ assert.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ assert.NotNil(t, d)
+ }
+ })
+ }
+}
+
+func TestDiscovery_Run(t *testing.T) {
+ expected := []*confgroup.Group{
+ {
+ Source: "module1",
+ Configs: []confgroup.Config{
+ {
+ "name": "module1",
+ "module": "module1",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__source__": "module1",
+ "__provider__": "dummy",
+ },
+ },
+ },
+ {
+ Source: "module2",
+ Configs: []confgroup.Config{
+ {
+ "name": "module2",
+ "module": "module2",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__source__": "module2",
+ "__provider__": "dummy",
+ },
+ },
+ },
+ }
+
+ reg := confgroup.Registry{
+ "module1": {},
+ "module2": {},
+ }
+ cfg := Config{
+ Registry: reg,
+ Names: []string{"module1", "module2"},
+ }
+
+ discovery, err := NewDiscovery(cfg)
+ require.NoError(t, err)
+
+ in := make(chan []*confgroup.Group)
+ timeout := time.Second * 2
+
+ go discovery.Run(context.Background(), in)
+
+ var actual []*confgroup.Group
+ select {
+ case actual = <-in:
+ case <-time.After(timeout):
+ t.Logf("discovery timed out after %s", timeout)
+ }
+ assert.Equal(t, expected, actual)
+}
diff --git a/agent/job/discovery/file/discovery.go b/agent/job/discovery/file/discovery.go
new file mode 100644
index 0000000..16fc454
--- /dev/null
+++ b/agent/job/discovery/file/discovery.go
@@ -0,0 +1,106 @@
+package file
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/logger"
+)
+
+type Config struct {
+ Registry confgroup.Registry
+ Read []string
+ Watch []string
+}
+
+func validateConfig(cfg Config) error {
+ if len(cfg.Registry) == 0 {
+ return errors.New("empty config registry")
+ }
+ if len(cfg.Read)+len(cfg.Watch) == 0 {
+ return errors.New("discoverers not set")
+ }
+ return nil
+}
+
+type (
+ discoverer interface {
+ Run(ctx context.Context, in chan<- []*confgroup.Group)
+ }
+ Discovery struct {
+ discoverers []discoverer
+ *logger.Logger
+ }
+)
+
+func NewDiscovery(cfg Config) (*Discovery, error) {
+ if err := validateConfig(cfg); err != nil {
+ return nil, fmt.Errorf("file discovery config validation: %v", err)
+ }
+
+ d := Discovery{
+ Logger: logger.New("discovery", "file manager"),
+ }
+ if err := d.registerDiscoverers(cfg); err != nil {
+ return nil, fmt.Errorf("file discovery initialization: %v", err)
+ }
+ return &d, nil
+}
+
+func (d Discovery) String() string {
+ return fmt.Sprintf("file discovery: %v", d.discoverers)
+}
+
+func (d *Discovery) registerDiscoverers(cfg Config) error {
+ if len(cfg.Read) != 0 {
+ d.discoverers = append(d.discoverers, NewReader(cfg.Registry, cfg.Read))
+ }
+ if len(cfg.Watch) != 0 {
+ d.discoverers = append(d.discoverers, NewWatcher(cfg.Registry, cfg.Watch))
+ }
+ if len(d.discoverers) == 0 {
+ return errors.New("zero registered discoverers")
+ }
+ return nil
+}
+
+func (d *Discovery) Run(ctx context.Context, in chan<- []*confgroup.Group) {
+ d.Info("instance is started")
+ defer func() { d.Info("instance is stopped") }()
+
+ var wg sync.WaitGroup
+
+ for _, dd := range d.discoverers {
+ wg.Add(1)
+ go func(dd discoverer) {
+ defer wg.Done()
+ d.runDiscoverer(ctx, dd, in)
+ }(dd)
+ }
+
+ wg.Wait()
+ <-ctx.Done()
+}
+
+func (d *Discovery) runDiscoverer(ctx context.Context, dd discoverer, in chan<- []*confgroup.Group) {
+ updates := make(chan []*confgroup.Group)
+ go dd.Run(ctx, updates)
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case groups, ok := <-updates:
+ if !ok {
+ return
+ }
+ select {
+ case <-ctx.Done():
+ return
+ case in <- groups:
+ }
+ }
+ }
+}
diff --git a/agent/job/discovery/file/discovery_test.go b/agent/job/discovery/file/discovery_test.go
new file mode 100644
index 0000000..a455900
--- /dev/null
+++ b/agent/job/discovery/file/discovery_test.go
@@ -0,0 +1,23 @@
+package file
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+// TODO: tech dept
+func TestNewDiscovery(t *testing.T) {
+
+}
+
+// TODO: tech dept
+func TestDiscovery_Run(t *testing.T) {
+
+}
+
+func prepareDiscovery(t *testing.T, cfg Config) *Discovery {
+ d, err := NewDiscovery(cfg)
+ require.NoError(t, err)
+ return d
+}
diff --git a/agent/job/discovery/file/parse.go b/agent/job/discovery/file/parse.go
new file mode 100644
index 0000000..25a1e04
--- /dev/null
+++ b/agent/job/discovery/file/parse.go
@@ -0,0 +1,131 @@
+package file
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+
+ "gopkg.in/yaml.v2"
+)
+
+type format int
+
+const (
+ unknownFormat format = iota
+ unknownEmptyFormat
+ staticFormat
+ sdFormat
+)
+
+func parse(req confgroup.Registry, path string) (*confgroup.Group, error) {
+ bs, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ if len(bs) == 0 {
+ return nil, nil
+ }
+
+ switch cfgFormat(bs) {
+ case staticFormat:
+ return parseStaticFormat(req, path, bs)
+ case sdFormat:
+ return parseSDFormat(req, path, bs)
+ case unknownEmptyFormat:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("unknown file format: '%s'", path)
+ }
+}
+
+func parseStaticFormat(reg confgroup.Registry, path string, bs []byte) (*confgroup.Group, error) {
+ name := fileName(path)
+ modDef, ok := reg.Lookup(name)
+ if !ok {
+ return nil, nil
+ }
+
+ var modCfg staticConfig
+ if err := yaml.Unmarshal(bs, &modCfg); err != nil {
+ return nil, err
+ }
+ for _, cfg := range modCfg.Jobs {
+ cfg.SetModule(name)
+ def := mergeDef(modCfg.Default, modDef)
+ cfg.Apply(def)
+ }
+ group := &confgroup.Group{
+ Configs: modCfg.Jobs,
+ Source: path,
+ }
+ return group, nil
+}
+
+func parseSDFormat(reg confgroup.Registry, path string, bs []byte) (*confgroup.Group, error) {
+ var cfgs sdConfig
+ if err := yaml.Unmarshal(bs, &cfgs); err != nil {
+ return nil, err
+ }
+
+ var i int
+ for _, cfg := range cfgs {
+ if def, ok := reg.Lookup(cfg.Module()); ok && cfg.Module() != "" {
+ cfg.Apply(def)
+ cfgs[i] = cfg
+ i++
+ }
+ }
+
+ group := &confgroup.Group{
+ Configs: cfgs[:i],
+ Source: path,
+ }
+ return group, nil
+}
+
+func cfgFormat(bs []byte) format {
+ var data interface{}
+ if err := yaml.Unmarshal(bs, &data); err != nil {
+ return unknownFormat
+ }
+ if data == nil {
+ return unknownEmptyFormat
+ }
+
+ type (
+ static = map[interface{}]interface{}
+ sd = []interface{}
+ )
+ switch data.(type) {
+ case static:
+ return staticFormat
+ case sd:
+ return sdFormat
+ default:
+ return unknownFormat
+ }
+}
+
+func mergeDef(a, b confgroup.Default) confgroup.Default {
+ return confgroup.Default{
+ MinUpdateEvery: firstPositive(a.MinUpdateEvery, b.MinUpdateEvery),
+ UpdateEvery: firstPositive(a.UpdateEvery, b.UpdateEvery),
+ AutoDetectionRetry: firstPositive(a.AutoDetectionRetry, b.AutoDetectionRetry),
+ Priority: firstPositive(a.Priority, b.Priority),
+ }
+}
+
+func firstPositive(value int, others ...int) int {
+ if value > 0 || len(others) == 0 {
+ return value
+ }
+ return firstPositive(others[0], others[1:]...)
+}
+
+func fileName(path string) string {
+ _, file := filepath.Split(path)
+ ext := filepath.Ext(path)
+ return file[:len(file)-len(ext)]
+}
diff --git a/agent/job/discovery/file/parse_test.go b/agent/job/discovery/file/parse_test.go
new file mode 100644
index 0000000..fb8f245
--- /dev/null
+++ b/agent/job/discovery/file/parse_test.go
@@ -0,0 +1,400 @@
+package file
+
+import (
+ "testing"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParse(t *testing.T) {
+ const (
+ jobDef = 11
+ cfgDef = 22
+ modDef = 33
+ )
+ tests := map[string]func(t *testing.T, tmp *tmpDir){
+ "static, default: +job +conf +module": func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
+ },
+ }
+ cfg := staticConfig{
+ Default: confgroup.Default{
+ UpdateEvery: cfgDef,
+ AutoDetectionRetry: cfgDef,
+ Priority: cfgDef,
+ },
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ "static, default: +job +conf +module (merge all)": func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {
+ Priority: modDef,
+ },
+ }
+ cfg := staticConfig{
+ Default: confgroup.Default{
+ AutoDetectionRetry: cfgDef,
+ },
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ "update_every": jobDef,
+ },
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ "autodetection_retry": cfgDef,
+ "priority": modDef,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ "static, default: -job +conf +module": func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
+ },
+ }
+ cfg := staticConfig{
+ Default: confgroup.Default{
+ UpdateEvery: cfgDef,
+ AutoDetectionRetry: cfgDef,
+ Priority: cfgDef,
+ },
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ },
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": cfgDef,
+ "autodetection_retry": cfgDef,
+ "priority": cfgDef,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ "static, default: -job -conf +module": func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
+ },
+ }
+ cfg := staticConfig{
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ },
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "autodetection_retry": modDef,
+ "priority": modDef,
+ "update_every": modDef,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ "static, default: -job -conf -module (+global)": func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := staticConfig{
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ },
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "update_every": module.UpdateEvery,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ "sd, default: +job +module": func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
+ },
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "sd_module",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "module": "sd_module",
+ "name": "name",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ "sd, default: -job +module": func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
+ },
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "sd_module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "sd_module",
+ "update_every": modDef,
+ "autodetection_retry": modDef,
+ "priority": modDef,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ "sd, default: -job -module (+global)": func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "sd_module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "sd_module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ "sd, job has no 'module' or 'module' is empty": func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{},
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ "conf registry has no module": func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{},
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ "empty file": func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+
+ filename := tmp.createFile("empty-*")
+ group, err := parse(reg, filename)
+
+ assert.Nil(t, group)
+ require.NoError(t, err)
+ },
+ "only comments, unknown empty format": func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{}
+
+ filename := tmp.createFile("unknown-empty-format-*")
+ tmp.writeString(filename, "# a comment")
+ group, err := parse(reg, filename)
+
+ assert.Nil(t, group)
+ assert.NoError(t, err)
+ },
+ "unknown format": func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{}
+
+ filename := tmp.createFile("unknown-format-*")
+ tmp.writeYAML(filename, "unknown")
+ group, err := parse(reg, filename)
+
+ assert.Nil(t, group)
+ assert.Error(t, err)
+ },
+ }
+
+ for name, scenario := range tests {
+ t.Run(name, func(t *testing.T) {
+ tmp := newTmpDir(t, "parse-file-*")
+ defer tmp.cleanup()
+ scenario(t, tmp)
+ })
+ }
+}
diff --git a/agent/job/discovery/file/read.go b/agent/job/discovery/file/read.go
new file mode 100644
index 0000000..f24522c
--- /dev/null
+++ b/agent/job/discovery/file/read.go
@@ -0,0 +1,79 @@
+package file
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/logger"
+)
+
+type (
+ staticConfig struct {
+ confgroup.Default `yaml:",inline"`
+ Jobs []confgroup.Config `yaml:"jobs"`
+ }
+ sdConfig []confgroup.Config
+)
+
+type Reader struct {
+ reg confgroup.Registry
+ paths []string
+ *logger.Logger
+}
+
+func NewReader(reg confgroup.Registry, paths []string) *Reader {
+ return &Reader{
+ reg: reg,
+ paths: paths,
+ Logger: logger.New("discovery", "file reader"),
+ }
+}
+
+func (r Reader) String() string {
+ return "file reader"
+}
+
+func (r Reader) Run(ctx context.Context, in chan<- []*confgroup.Group) {
+ r.Info("instance is started")
+ defer func() { r.Info("instance is stopped") }()
+
+ select {
+ case <-ctx.Done():
+ case in <- r.groups():
+ }
+ close(in)
+}
+
+func (r Reader) groups() (groups []*confgroup.Group) {
+ for _, pattern := range r.paths {
+ matches, err := filepath.Glob(pattern)
+ if err != nil {
+ continue
+ }
+
+ for _, path := range matches {
+ if fi, err := os.Stat(path); err != nil || !fi.Mode().IsRegular() {
+ continue
+ }
+
+ group, err := parse(r.reg, path)
+ if err != nil {
+ r.Warningf("parse '%s': %v", path, err)
+ continue
+ }
+ if group == nil {
+ group = &confgroup.Group{Source: path}
+ }
+ groups = append(groups, group)
+ }
+ }
+ for _, group := range groups {
+ for _, cfg := range group.Configs {
+ cfg.SetSource(group.Source)
+ cfg.SetProvider("file reader")
+ }
+ }
+ return groups
+}
diff --git a/agent/job/discovery/file/read_test.go b/agent/job/discovery/file/read_test.go
new file mode 100644
index 0000000..65abd9d
--- /dev/null
+++ b/agent/job/discovery/file/read_test.go
@@ -0,0 +1,97 @@
+package file
+
+import (
+ "testing"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/agent/module"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestReader_String(t *testing.T) {
+ assert.NotEmpty(t, NewReader(confgroup.Registry{}, nil))
+}
+
+func TestNewReader(t *testing.T) {
+ tests := map[string]struct {
+ reg confgroup.Registry
+ paths []string
+ }{
+ "empty inputs": {
+ reg: confgroup.Registry{},
+ paths: []string{},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) { assert.NotNil(t, NewReader(test.reg, test.paths)) })
+ }
+}
+
+func TestReader_Run(t *testing.T) {
+ tmp := newTmpDir(t, "reader-run-*")
+ defer tmp.cleanup()
+
+ module1 := tmp.join("module1.conf")
+ module2 := tmp.join("module2.conf")
+ module3 := tmp.join("module3.conf")
+
+ tmp.writeYAML(module1, staticConfig{
+ Jobs: []confgroup.Config{{"name": "name"}},
+ })
+ tmp.writeYAML(module2, staticConfig{
+ Jobs: []confgroup.Config{{"name": "name"}},
+ })
+ tmp.writeString(module3, "# a comment")
+
+ reg := confgroup.Registry{
+ "module1": {},
+ "module2": {},
+ "module3": {},
+ }
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Read: []string{module1, module2, module3},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: module1,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module1",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__source__": module1,
+ "__provider__": "file reader",
+ },
+ },
+ },
+ {
+ Source: module2,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module2",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__source__": module2,
+ "__provider__": "file reader",
+ },
+ },
+ },
+ {
+ Source: module3,
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ expectedGroups: expected,
+ }
+
+ sim.run(t)
+}
diff --git a/agent/job/discovery/file/sim_test.go b/agent/job/discovery/file/sim_test.go
new file mode 100644
index 0000000..ba88044
--- /dev/null
+++ b/agent/job/discovery/file/sim_test.go
@@ -0,0 +1,129 @@
+package file
+
+import (
+ "context"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v2"
+)
+
+type (
+ discoverySim struct {
+ discovery *Discovery
+ beforeRun func()
+ afterRun func()
+ expectedGroups []*confgroup.Group
+ }
+)
+
+func (sim discoverySim) run(t *testing.T) {
+ t.Helper()
+ require.NotNil(t, sim.discovery)
+
+ if sim.beforeRun != nil {
+ sim.beforeRun()
+ }
+
+ in, out := make(chan []*confgroup.Group), make(chan []*confgroup.Group)
+ go sim.collectGroups(t, in, out)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+ defer cancel()
+ go sim.discovery.Run(ctx, in)
+ time.Sleep(time.Millisecond * 250)
+
+ if sim.afterRun != nil {
+ sim.afterRun()
+ }
+
+ actual := <-out
+
+ sortGroups(actual)
+ sortGroups(sim.expectedGroups)
+
+ assert.Equal(t, sim.expectedGroups, actual)
+}
+
+func (sim discoverySim) collectGroups(t *testing.T, in, out chan []*confgroup.Group) {
+ timeout := time.Second * 5
+ var groups []*confgroup.Group
+loop:
+ for {
+ select {
+ case updates := <-in:
+ if groups = append(groups, updates...); len(groups) >= len(sim.expectedGroups) {
+ break loop
+ }
+ case <-time.After(timeout):
+ t.Logf("discovery %s timed out after %s, got %d groups, expected %d, some events are skipped",
+ sim.discovery.discoverers, timeout, len(groups), len(sim.expectedGroups))
+ break loop
+ }
+ }
+ out <- groups
+}
+
+type tmpDir struct {
+ dir string
+ t *testing.T
+}
+
+func newTmpDir(t *testing.T, pattern string) *tmpDir {
+ pattern = "netdata-go-test-discovery-file-" + pattern
+ dir, err := ioutil.TempDir(os.TempDir(), pattern)
+ require.NoError(t, err)
+ return &tmpDir{dir: dir, t: t}
+}
+
+func (d *tmpDir) cleanup() {
+ assert.NoError(d.t, os.RemoveAll(d.dir))
+}
+
+func (d *tmpDir) join(filename string) string {
+ return filepath.Join(d.dir, filename)
+}
+
+func (d *tmpDir) createFile(pattern string) string {
+ f, err := ioutil.TempFile(d.dir, pattern)
+ require.NoError(d.t, err)
+ _ = f.Close()
+ return f.Name()
+}
+
+func (d *tmpDir) removeFile(filename string) {
+ err := os.Remove(filename)
+ require.NoError(d.t, err)
+}
+
+func (d *tmpDir) renameFile(origFilename, newFilename string) {
+ err := os.Rename(origFilename, newFilename)
+ require.NoError(d.t, err)
+}
+
+func (d *tmpDir) writeYAML(filename string, in interface{}) {
+ bs, err := yaml.Marshal(in)
+ require.NoError(d.t, err)
+ err = ioutil.WriteFile(filename, bs, 0644)
+ require.NoError(d.t, err)
+}
+
+func (d *tmpDir) writeString(filename, data string) {
+ err := ioutil.WriteFile(filename, []byte(data), 0644)
+ require.NoError(d.t, err)
+}
+
+func sortGroups(groups []*confgroup.Group) {
+ if len(groups) == 0 {
+ return
+ }
+ sort.Slice(groups, func(i, j int) bool { return groups[i].Source < groups[j].Source })
+}
diff --git a/agent/job/discovery/file/watch.go b/agent/job/discovery/file/watch.go
new file mode 100644
index 0000000..254d1bf
--- /dev/null
+++ b/agent/job/discovery/file/watch.go
@@ -0,0 +1,221 @@
+package file
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/logger"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+type (
+ Watcher struct {
+ paths []string
+ reg confgroup.Registry
+ watcher *fsnotify.Watcher
+ cache cache
+ refreshEvery time.Duration
+ *logger.Logger
+ }
+ cache map[string]time.Time
+)
+
+func (c cache) lookup(path string) (time.Time, bool) { v, ok := c[path]; return v, ok }
+func (c cache) has(path string) bool { _, ok := c.lookup(path); return ok }
+func (c cache) remove(path string) { delete(c, path) }
+func (c cache) put(path string, modTime time.Time) { c[path] = modTime }
+
+func NewWatcher(reg confgroup.Registry, paths []string) *Watcher {
+ d := &Watcher{
+ paths: paths,
+ reg: reg,
+ watcher: nil,
+ cache: make(cache),
+ refreshEvery: time.Minute,
+ Logger: logger.New("discovery", "file watcher"),
+ }
+ return d
+}
+
+func (w Watcher) String() string {
+ return "file watcher"
+}
+
+func (w *Watcher) Run(ctx context.Context, in chan<- []*confgroup.Group) {
+ w.Info("instance is started")
+ defer func() { w.Info("instance is stopped") }()
+
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ w.Errorf("fsnotify watcher initialization: %v", err)
+ return
+ }
+
+ w.watcher = watcher
+ defer w.stop()
+ w.refresh(ctx, in)
+
+ tk := time.NewTicker(w.refreshEvery)
+ defer tk.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-tk.C:
+ w.refresh(ctx, in)
+ case event := <-w.watcher.Events:
+ if event.Name == "" || isChmod(event) || !w.fileMatches(event.Name) {
+ break
+ }
+ if isCreate(event) && w.cache.has(event.Name) {
+ // vim "backupcopy=no" case, already collected after Rename event.
+ break
+ }
+ if isRename(event) {
+ // It is common to modify files using vim.
+ // When writing to a file a backup is made. "backupcopy" option tells how it's done.
+ // Default is "no": rename the file and write a new one.
+ // This is cheap attempt to not send empty group for the old file.
+ time.Sleep(time.Millisecond * 100)
+ }
+ w.refresh(ctx, in)
+ case err := <-w.watcher.Errors:
+ if err != nil {
+ w.Warningf("watch: %v", err)
+ }
+ }
+ }
+}
+
+func (w *Watcher) fileMatches(file string) bool {
+ for _, pattern := range w.paths {
+ if ok, _ := filepath.Match(pattern, file); ok {
+ return true
+ }
+ }
+ return false
+}
+
+func (w *Watcher) listFiles() (files []string) {
+ for _, pattern := range w.paths {
+ if matches, err := filepath.Glob(pattern); err == nil {
+ files = append(files, matches...)
+ }
+ }
+ return files
+}
+
+func (w *Watcher) refresh(ctx context.Context, in chan<- []*confgroup.Group) {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ var groups []*confgroup.Group
+ seen := make(map[string]bool)
+
+ for _, file := range w.listFiles() {
+ fi, err := os.Lstat(file)
+ if err != nil {
+ w.Warningf("lstat '%s': %v", file, err)
+ continue
+ }
+
+ if !fi.Mode().IsRegular() {
+ continue
+ }
+
+ seen[file] = true
+ if v, ok := w.cache.lookup(file); ok && v.Equal(fi.ModTime()) {
+ continue
+ }
+ w.cache.put(file, fi.ModTime())
+
+ if group, err := parse(w.reg, file); err != nil {
+ w.Warningf("parse '%s': %v", file, err)
+ } else if group == nil {
+ groups = append(groups, &confgroup.Group{Source: file})
+ } else {
+ groups = append(groups, group)
+ }
+ }
+
+ for name := range w.cache {
+ if seen[name] {
+ continue
+ }
+ w.cache.remove(name)
+ groups = append(groups, &confgroup.Group{Source: name})
+ }
+
+ for _, group := range groups {
+ for _, cfg := range group.Configs {
+ cfg.SetSource(group.Source)
+ cfg.SetProvider("file watcher")
+ }
+ }
+
+ send(ctx, in, groups)
+ w.watchDirs()
+}
+
+func (w *Watcher) watchDirs() {
+ for _, path := range w.paths {
+ if idx := strings.LastIndex(path, "/"); idx > -1 {
+ path = path[:idx]
+ } else {
+ path = "./"
+ }
+ if err := w.watcher.Add(path); err != nil {
+ w.Errorf("start watching '%s': %v", path, err)
+ }
+ }
+}
+
+func (w *Watcher) stop() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // closing the watcher deadlocks unless all events and errors are drained.
+ go func() {
+ for {
+ select {
+ case <-w.watcher.Errors:
+ case <-w.watcher.Events:
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+
+ // in fact never returns an error
+ _ = w.watcher.Close()
+}
+
+func isChmod(event fsnotify.Event) bool {
+ return event.Op^fsnotify.Chmod == 0
+}
+
+func isRename(event fsnotify.Event) bool {
+ return event.Op&fsnotify.Rename == fsnotify.Rename
+}
+
+func isCreate(event fsnotify.Event) bool {
+ return event.Op&fsnotify.Create == fsnotify.Create
+}
+
+func send(ctx context.Context, in chan<- []*confgroup.Group, groups []*confgroup.Group) {
+ if len(groups) == 0 {
+ return
+ }
+ select {
+ case <-ctx.Done():
+ case in <- groups:
+ }
+}
diff --git a/agent/job/discovery/file/watch_test.go b/agent/job/discovery/file/watch_test.go
new file mode 100644
index 0000000..fd33b05
--- /dev/null
+++ b/agent/job/discovery/file/watch_test.go
@@ -0,0 +1,350 @@
+package file
+
+import (
+ "testing"
+ "time"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/agent/module"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestWatcher_String(t *testing.T) {
+ assert.NotEmpty(t, NewWatcher(confgroup.Registry{}, nil))
+}
+
+func TestNewWatcher(t *testing.T) {
+ tests := map[string]struct {
+ reg confgroup.Registry
+ paths []string
+ }{
+ "empty inputs": {
+ reg: confgroup.Registry{},
+ paths: []string{},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) { assert.NotNil(t, NewWatcher(test.reg, test.paths)) })
+ }
+}
+
+func TestWatcher_Run(t *testing.T) {
+ tests := map[string]func(tmp *tmpDir) discoverySim{
+ "file exists before start": func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__source__": filename,
+ "__provider__": "file watcher",
+ },
+ },
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeYAML(filename, cfg)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ "empty file": func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeString(filename, "")
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ "only comments, no data": func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeString(filename, "# a comment")
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ "add file": func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__source__": filename,
+ "__provider__": "file watcher",
+ },
+ },
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ afterRun: func() {
+ tmp.writeYAML(filename, cfg)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ "remove file": func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__source__": filename,
+ "__provider__": "file watcher",
+ },
+ },
+ },
+ {
+ Source: filename,
+ Configs: nil,
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeYAML(filename, cfg)
+ },
+ afterRun: func() {
+ tmp.removeFile(filename)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ "change file": func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfgOrig := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ cfgChanged := sdConfig{
+ {
+ "name": "name_changed",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__source__": filename,
+ "__provider__": "file watcher",
+ },
+ },
+ },
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name_changed",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__source__": filename,
+ "__provider__": "file watcher",
+ },
+ },
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeYAML(filename, cfgOrig)
+ },
+ afterRun: func() {
+ tmp.writeYAML(filename, cfgChanged)
+ time.Sleep(time.Millisecond * 500)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ "vim 'backupcopy=no' (writing to a file and backup)": func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__source__": filename,
+ "__provider__": "file watcher",
+ },
+ },
+ },
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__source__": filename,
+ "__provider__": "file watcher",
+ },
+ },
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeYAML(filename, cfg)
+ },
+ afterRun: func() {
+ newFilename := filename + ".swp"
+ tmp.renameFile(filename, newFilename)
+ tmp.writeYAML(filename, cfg)
+ tmp.removeFile(newFilename)
+ time.Sleep(time.Millisecond * 500)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ }
+
+ for name, createSim := range tests {
+ t.Run(name, func(t *testing.T) {
+ tmp := newTmpDir(t, "watch-run-*")
+ defer tmp.cleanup()
+
+ createSim(tmp).run(t)
+ })
+ }
+}
diff --git a/agent/job/discovery/manager.go b/agent/job/discovery/manager.go
new file mode 100644
index 0000000..002e5f0
--- /dev/null
+++ b/agent/job/discovery/manager.go
@@ -0,0 +1,196 @@
+package discovery
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/agent/job/discovery/dummy"
+ "github.com/netdata/go.d.plugin/agent/job/discovery/file"
+ "github.com/netdata/go.d.plugin/logger"
+)
+
+type Config struct {
+ Registry confgroup.Registry
+ File file.Config
+ Dummy dummy.Config
+}
+
+func validateConfig(cfg Config) error {
+ if len(cfg.Registry) == 0 {
+ return errors.New("empty config registry")
+ }
+ if len(cfg.File.Read)+len(cfg.File.Watch) == 0 && len(cfg.Dummy.Names) == 0 {
+ return errors.New("discoverers not set")
+ }
+ return nil
+}
+
+type (
+ discoverer interface {
+ Run(ctx context.Context, in chan<- []*confgroup.Group)
+ }
+ Manager struct {
+ *logger.Logger
+ discoverers []discoverer
+ send chan struct{}
+ sendEvery time.Duration
+ mux *sync.RWMutex
+ cache *cache
+ }
+)
+
+func NewManager(cfg Config) (*Manager, error) {
+ if err := validateConfig(cfg); err != nil {
+ return nil, fmt.Errorf("discovery manager config validation: %v", err)
+ }
+ mgr := &Manager{
+ send: make(chan struct{}, 1),
+ sendEvery: time.Second * 2, // some timeout to aggregate changes
+ discoverers: make([]discoverer, 0),
+ mux: &sync.RWMutex{},
+ cache: newCache(),
+ Logger: logger.New("discovery", "manager"),
+ }
+ if err := mgr.registerDiscoverers(cfg); err != nil {
+ return nil, fmt.Errorf("discovery manager initializaion: %v", err)
+ }
+ return mgr, nil
+}
+
+func (m Manager) String() string {
+ return fmt.Sprintf("discovery manager: %v", m.discoverers)
+}
+
+func (m *Manager) registerDiscoverers(cfg Config) error {
+ if len(cfg.File.Read) > 0 || len(cfg.File.Watch) > 0 {
+ cfg.File.Registry = cfg.Registry
+ d, err := file.NewDiscovery(cfg.File)
+ if err != nil {
+ return err
+ }
+ m.discoverers = append(m.discoverers, d)
+ }
+
+ if len(cfg.Dummy.Names) > 0 {
+ cfg.Dummy.Registry = cfg.Registry
+ d, err := dummy.NewDiscovery(cfg.Dummy)
+ if err != nil {
+ return err
+ }
+ m.discoverers = append(m.discoverers, d)
+ }
+
+ if len(m.discoverers) == 0 {
+ return errors.New("zero registered discoverers")
+ }
+ m.Infof("registered discoverers: %v", m.discoverers)
+ return nil
+}
+
+func (m *Manager) Run(ctx context.Context, in chan<- []*confgroup.Group) {
+ m.Info("instance is started")
+ defer func() { m.Info("instance is stopped") }()
+
+ var wg sync.WaitGroup
+
+ for _, d := range m.discoverers {
+ wg.Add(1)
+ go func(d discoverer) {
+ defer wg.Done()
+ m.runDiscoverer(ctx, d)
+ }(d)
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ m.sendLoop(ctx, in)
+ }()
+
+ wg.Wait()
+ <-ctx.Done()
+}
+
+func (m *Manager) runDiscoverer(ctx context.Context, d discoverer) {
+ updates := make(chan []*confgroup.Group)
+ go d.Run(ctx, updates)
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case groups, ok := <-updates:
+ if !ok {
+ return
+ }
+ func() {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ m.cache.update(groups)
+ m.triggerSend()
+ }()
+ }
+ }
+}
+
+func (m *Manager) sendLoop(ctx context.Context, in chan<- []*confgroup.Group) {
+ m.mustSend(ctx, in)
+
+ tk := time.NewTicker(m.sendEvery)
+ defer tk.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-tk.C:
+ select {
+ case <-m.send:
+ m.trySend(in)
+ default:
+ }
+ }
+ }
+}
+
+func (m *Manager) mustSend(ctx context.Context, in chan<- []*confgroup.Group) {
+ select {
+ case <-ctx.Done():
+ return
+ case <-m.send:
+ m.mux.Lock()
+ groups := m.cache.groups()
+ m.cache.reset()
+ m.mux.Unlock()
+
+ select {
+ case <-ctx.Done():
+ case in <- groups:
+ }
+ return
+ }
+}
+
+func (m *Manager) trySend(in chan<- []*confgroup.Group) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ select {
+ case in <- m.cache.groups():
+ m.cache.reset()
+ default:
+ m.triggerSend()
+ }
+}
+
+func (m *Manager) triggerSend() {
+ select {
+ case m.send <- struct{}{}:
+ default:
+ }
+}
diff --git a/agent/job/discovery/manager_test.go b/agent/job/discovery/manager_test.go
new file mode 100644
index 0000000..44dfbab
--- /dev/null
+++ b/agent/job/discovery/manager_test.go
@@ -0,0 +1,175 @@
+package discovery
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/agent/job/discovery/file"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewManager(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ wantErr bool
+ }{
+ "valid config": {
+ cfg: Config{
+ Registry: confgroup.Registry{"module1": confgroup.Default{}},
+ File: file.Config{Read: []string{"path"}},
+ },
+ },
+ "invalid config, registry not set": {
+ cfg: Config{
+ File: file.Config{Read: []string{"path"}},
+ },
+ wantErr: true,
+ },
+ "invalid config, discoverers not set": {
+ cfg: Config{
+ Registry: confgroup.Registry{"module1": confgroup.Default{}},
+ },
+ wantErr: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mgr, err := NewManager(test.cfg)
+
+ if test.wantErr {
+ assert.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ assert.NotNil(t, mgr)
+ }
+ })
+ }
+}
+
+func TestManager_Run(t *testing.T) {
+ tests := map[string]func() discoverySim{
+ "several discoverers, unique groups with delayed collect": func() discoverySim {
+ const numGroups, numCfgs = 2, 2
+ d1 := prepareMockDiscoverer("test1", numGroups, numCfgs)
+ d2 := prepareMockDiscoverer("test2", numGroups, numCfgs)
+ mgr := prepareManager(d1, d2)
+ expected := combineGroups(d1.groups, d2.groups)
+
+ sim := discoverySim{
+ mgr: mgr,
+ collectDelay: mgr.sendEvery + time.Second,
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ "several discoverers, unique groups": func() discoverySim {
+ const numGroups, numCfgs = 2, 2
+ d1 := prepareMockDiscoverer("test1", numGroups, numCfgs)
+ d2 := prepareMockDiscoverer("test2", numGroups, numCfgs)
+ mgr := prepareManager(d1, d2)
+ expected := combineGroups(d1.groups, d2.groups)
+ sim := discoverySim{
+ mgr: mgr,
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ "several discoverers, same groups": func() discoverySim {
+ const numGroups, numTargets = 2, 2
+ d1 := prepareMockDiscoverer("test1", numGroups, numTargets)
+ mgr := prepareManager(d1, d1)
+ expected := combineGroups(d1.groups)
+
+ sim := discoverySim{
+ mgr: mgr,
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ "several discoverers, empty groups": func() discoverySim {
+ const numGroups, numCfgs = 1, 0
+ d1 := prepareMockDiscoverer("test1", numGroups, numCfgs)
+ d2 := prepareMockDiscoverer("test2", numGroups, numCfgs)
+ mgr := prepareManager(d1, d2)
+ expected := combineGroups(d1.groups, d2.groups)
+
+ sim := discoverySim{
+ mgr: mgr,
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ "several discoverers, nil groups": func() discoverySim {
+ const numGroups, numCfgs = 0, 0
+ d1 := prepareMockDiscoverer("test1", numGroups, numCfgs)
+ d2 := prepareMockDiscoverer("test2", numGroups, numCfgs)
+ mgr := prepareManager(d1, d2)
+
+ sim := discoverySim{
+ mgr: mgr,
+ expectedGroups: nil,
+ }
+ return sim
+ },
+ }
+
+ for name, sim := range tests {
+ t.Run(name, func(t *testing.T) { sim().run(t) })
+ }
+}
+
+func prepareMockDiscoverer(source string, groups, configs int) mockDiscoverer {
+ d := mockDiscoverer{}
+
+ for i := 0; i < groups; i++ {
+ group := confgroup.Group{
+ Source: fmt.Sprintf("%s_group_%d", source, i+1),
+ }
+ for j := 0; j < configs; j++ {
+ group.Configs = append(group.Configs,
+ confgroup.Config{"name": fmt.Sprintf("%s_group_%d_target_%d", source, i+1, j+1)})
+ }
+ d.groups = append(d.groups, &group)
+ }
+ return d
+}
+
+func prepareManager(discoverers ...discoverer) *Manager {
+ mgr := &Manager{
+ send: make(chan struct{}, 1),
+ sendEvery: 2 * time.Second,
+ discoverers: discoverers,
+ cache: newCache(),
+ mux: &sync.RWMutex{},
+ }
+ return mgr
+}
+
+type mockDiscoverer struct {
+ groups []*confgroup.Group
+}
+
+func (md mockDiscoverer) Run(ctx context.Context, out chan<- []*confgroup.Group) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case out <- md.groups:
+ return
+ }
+ }
+}
+
+func combineGroups(groups ...[]*confgroup.Group) (combined []*confgroup.Group) {
+ for _, set := range groups {
+ combined = append(combined, set...)
+ }
+ return combined
+}
diff --git a/agent/job/discovery/sim_test.go b/agent/job/discovery/sim_test.go
new file mode 100644
index 0000000..f64fa54
--- /dev/null
+++ b/agent/job/discovery/sim_test.go
@@ -0,0 +1,66 @@
+package discovery
+
+import (
+ "context"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type discoverySim struct {
+ mgr *Manager
+ collectDelay time.Duration
+ expectedGroups []*confgroup.Group
+}
+
+func (sim discoverySim) run(t *testing.T) {
+ t.Helper()
+ require.NotNil(t, sim.mgr)
+
+ in, out := make(chan []*confgroup.Group), make(chan []*confgroup.Group)
+ go sim.collectGroups(t, in, out)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ go sim.mgr.Run(ctx, in)
+
+ actualGroups := <-out
+
+ sortGroups(sim.expectedGroups)
+ sortGroups(actualGroups)
+
+ assert.Equal(t, sim.expectedGroups, actualGroups)
+}
+
+func (sim discoverySim) collectGroups(t *testing.T, in, out chan []*confgroup.Group) {
+ time.Sleep(sim.collectDelay)
+
+ timeout := sim.mgr.sendEvery + time.Second*2
+ var groups []*confgroup.Group
+loop:
+ for {
+ select {
+ case inGroups := <-in:
+ if groups = append(groups, inGroups...); len(groups) >= len(sim.expectedGroups) {
+ break loop
+ }
+ case <-time.After(timeout):
+ t.Logf("discovery %s timed out after %s, got %d groups, expected %d, some events are skipped",
+ sim.mgr.discoverers, timeout, len(groups), len(sim.expectedGroups))
+ break loop
+ }
+ }
+ out <- groups
+}
+
+func sortGroups(groups []*confgroup.Group) {
+ if len(groups) == 0 {
+ return
+ }
+ sort.Slice(groups, func(i, j int) bool { return groups[i].Source < groups[j].Source })
+}
diff --git a/agent/job/job.go b/agent/job/job.go
new file mode 100644
index 0000000..b8a0402
--- /dev/null
+++ b/agent/job/job.go
@@ -0,0 +1,13 @@
+package job
+
+type Job interface {
+ Name() string
+ ModuleName() string
+ FullName() string
+ AutoDetection() bool
+ AutoDetectionEvery() int
+ RetryAutoDetection() bool
+ Tick(clock int)
+ Start()
+ Stop()
+}
diff --git a/agent/job/mock.go b/agent/job/mock.go
new file mode 100644
index 0000000..7a06d52
--- /dev/null
+++ b/agent/job/mock.go
@@ -0,0 +1,82 @@
+package job
+
+type MockJob struct {
+ FullNameFunc func() string
+ ModuleNameFunc func() string
+ NameFunc func() string
+ AutoDetectionFunc func() bool
+ AutoDetectionEveryFunc func() int
+ RetryAutoDetectionFunc func() bool
+ TickFunc func(int)
+ StartFunc func()
+ StopFunc func()
+}
+
+// FullName returns mock job full name.
+func (m MockJob) FullName() string {
+ if m.FullNameFunc == nil {
+ return "mock"
+ }
+ return m.FullNameFunc()
+}
+
+// ModuleName returns mock job module name.
+func (m MockJob) ModuleName() string {
+ if m.ModuleNameFunc == nil {
+ return "mock"
+ }
+ return m.ModuleNameFunc()
+}
+
+// Name returns mock job name.
+func (m MockJob) Name() string {
+ if m.NameFunc == nil {
+ return "mock"
+ }
+ return m.NameFunc()
+}
+
+// AutoDetectionEvery returns mock job AutoDetectionEvery.
+func (m MockJob) AutoDetectionEvery() int {
+ if m.AutoDetectionEveryFunc == nil {
+ return 0
+ }
+ return m.AutoDetectionEveryFunc()
+}
+
+// AutoDetection returns mock job AutoDetection.
+func (m MockJob) AutoDetection() bool {
+ if m.AutoDetectionFunc == nil {
+ return true
+ }
+ return m.AutoDetectionFunc()
+}
+
+// RetryAutoDetection invokes mock job RetryAutoDetection.
+func (m MockJob) RetryAutoDetection() bool {
+ if m.RetryAutoDetectionFunc == nil {
+ return true
+ }
+ return m.RetryAutoDetectionFunc()
+}
+
+// Tick invokes mock job Tick.
+func (m MockJob) Tick(clock int) {
+ if m.TickFunc != nil {
+ m.TickFunc(clock)
+ }
+}
+
+// Start invokes mock job Start.
+func (m MockJob) Start() {
+ if m.StartFunc != nil {
+ m.StartFunc()
+ }
+}
+
+// Stop invokes mock job Stop.
+func (m MockJob) Stop() {
+ if m.StopFunc != nil {
+ m.StopFunc()
+ }
+}
diff --git a/agent/job/mock_test.go b/agent/job/mock_test.go
new file mode 100644
index 0000000..915bac2
--- /dev/null
+++ b/agent/job/mock_test.go
@@ -0,0 +1,79 @@
+package job
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMockJob_FullName(t *testing.T) {
+ m := &MockJob{}
+ expected := "name"
+
+ assert.NotEqual(t, expected, m.FullName())
+ m.FullNameFunc = func() string { return expected }
+ assert.Equal(t, expected, m.FullName())
+}
+
+func TestMockJob_ModuleName(t *testing.T) {
+ m := &MockJob{}
+ expected := "name"
+
+ assert.NotEqual(t, expected, m.ModuleName())
+ m.ModuleNameFunc = func() string { return expected }
+ assert.Equal(t, expected, m.ModuleName())
+}
+
+func TestMockJob_Name(t *testing.T) {
+ m := &MockJob{}
+ expected := "name"
+
+ assert.NotEqual(t, expected, m.Name())
+ m.NameFunc = func() string { return expected }
+ assert.Equal(t, expected, m.Name())
+}
+
+func TestMockJob_AutoDetectionEvery(t *testing.T) {
+ m := &MockJob{}
+ expected := -1
+
+ assert.NotEqual(t, expected, m.AutoDetectionEvery())
+ m.AutoDetectionEveryFunc = func() int { return expected }
+ assert.Equal(t, expected, m.AutoDetectionEvery())
+}
+
+func TestMockJob_RetryAutoDetection(t *testing.T) {
+ m := &MockJob{}
+ expected := true
+
+ assert.True(t, m.RetryAutoDetection())
+ m.RetryAutoDetectionFunc = func() bool { return expected }
+ assert.True(t, m.RetryAutoDetection())
+}
+
+func TestMockJob_AutoDetection(t *testing.T) {
+ m := &MockJob{}
+ expected := true
+
+ assert.True(t, m.AutoDetection())
+ m.AutoDetectionFunc = func() bool { return expected }
+ assert.True(t, m.AutoDetection())
+}
+
+func TestMockJob_Tick(t *testing.T) {
+ m := &MockJob{}
+
+ assert.NotPanics(t, func() { m.Tick(1) })
+}
+
+func TestMockJob_Start(t *testing.T) {
+ m := &MockJob{}
+
+ assert.NotPanics(t, func() { m.Start() })
+}
+
+func TestMockJob_Stop(t *testing.T) {
+ m := &MockJob{}
+
+ assert.NotPanics(t, func() { m.Stop() })
+}
diff --git a/agent/job/registry/registry.go b/agent/job/registry/registry.go
new file mode 100644
index 0000000..5a586b1
--- /dev/null
+++ b/agent/job/registry/registry.go
@@ -0,0 +1,47 @@
+package registry
+
+import (
+ "path/filepath"
+
+ "github.com/gofrs/flock"
+)
+
+type FileLockRegistry struct {
+ Dir string
+ locks map[string]*flock.Flock
+}
+
+func NewFileLockRegistry(dir string) *FileLockRegistry {
+ return &FileLockRegistry{
+ Dir: dir,
+ locks: make(map[string]*flock.Flock),
+ }
+}
+
+const suffix = ".collector.lock"
+
+func (r *FileLockRegistry) Register(name string) (bool, error) {
+ name = filepath.Join(r.Dir, name+suffix)
+ if _, ok := r.locks[name]; ok {
+ return true, nil
+ }
+
+ locker := flock.New(name)
+ ok, err := locker.TryLock()
+ if ok {
+ r.locks[name] = locker
+ } else {
+ _ = locker.Close()
+ }
+ return ok, err
+}
+
+func (r *FileLockRegistry) Unregister(name string) error {
+ name = filepath.Join(r.Dir, name+suffix)
+ locker, ok := r.locks[name]
+ if !ok {
+ return nil
+ }
+ delete(r.locks, name)
+ return locker.Close()
+}
diff --git a/agent/job/registry/registry_test.go b/agent/job/registry/registry_test.go
new file mode 100644
index 0000000..466d53c
--- /dev/null
+++ b/agent/job/registry/registry_test.go
@@ -0,0 +1,95 @@
+package registry
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewFileLockRegistry(t *testing.T) {
+ assert.NotNil(t, NewFileLockRegistry(""))
+}
+
+func TestFileLockRegistry_Register(t *testing.T) {
+ tests := map[string]func(t *testing.T, dir string){
+ "register a lock": func(t *testing.T, dir string) {
+ reg := NewFileLockRegistry(dir)
+
+ ok, err := reg.Register("name")
+ assert.True(t, ok)
+ assert.NoError(t, err)
+ },
+ "register the same lock twice": func(t *testing.T, dir string) {
+ reg := NewFileLockRegistry(dir)
+
+ ok, err := reg.Register("name")
+ require.True(t, ok)
+ require.NoError(t, err)
+
+ ok, err = reg.Register("name")
+ assert.True(t, ok)
+ assert.NoError(t, err)
+ },
+ "failed to register locked by other process lock": func(t *testing.T, dir string) {
+ reg1 := NewFileLockRegistry(dir)
+ reg2 := NewFileLockRegistry(dir)
+
+ ok, err := reg1.Register("name")
+ require.True(t, ok)
+ require.NoError(t, err)
+
+ ok, err = reg2.Register("name")
+ assert.False(t, ok)
+ assert.NoError(t, err)
+ },
+ "failed to register because a directory doesnt exist": func(t *testing.T, dir string) {
+ reg := NewFileLockRegistry(dir + dir)
+
+ ok, err := reg.Register("name")
+ assert.False(t, ok)
+ assert.Error(t, err)
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dir, err := ioutil.TempDir(os.TempDir(), "netdata-go-test-file-lock-registry")
+ require.NoError(t, err)
+ defer func() { require.NoError(t, os.RemoveAll(dir)) }()
+
+ test(t, dir)
+ })
+ }
+}
+
+func TestFileLockRegistry_Unregister(t *testing.T) {
+ tests := map[string]func(t *testing.T, dir string){
+ "unregister a lock": func(t *testing.T, dir string) {
+ reg := NewFileLockRegistry(dir)
+
+ ok, err := reg.Register("name")
+ require.True(t, ok)
+ require.NoError(t, err)
+
+ assert.NoError(t, reg.Unregister("name"))
+ },
+ "unregister not registered lock": func(t *testing.T, dir string) {
+ reg := NewFileLockRegistry(dir)
+
+ assert.NoError(t, reg.Unregister("name"))
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dir, err := ioutil.TempDir(os.TempDir(), "netdata-go-test-file-lock-registry")
+ require.NoError(t, err)
+ defer func() { require.NoError(t, os.RemoveAll(dir)) }()
+
+ test(t, dir)
+ })
+ }
+}
diff --git a/agent/job/run/run.go b/agent/job/run/run.go
new file mode 100644
index 0000000..f2bff8e
--- /dev/null
+++ b/agent/job/run/run.go
@@ -0,0 +1,99 @@
+package run
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ jobpkg "github.com/netdata/go.d.plugin/agent/job"
+ "github.com/netdata/go.d.plugin/agent/ticker"
+ "github.com/netdata/go.d.plugin/logger"
+)
+
+type (
+ Manager struct {
+ mux sync.Mutex
+ queue queue
+ *logger.Logger
+ }
+ queue []jobpkg.Job
+)
+
+func NewManager() *Manager {
+ return &Manager{
+ mux: sync.Mutex{},
+ Logger: logger.New("run", "manager"),
+ }
+}
+
+func (m *Manager) Run(ctx context.Context) {
+ m.Info("instance is started")
+ defer func() { m.Info("instance is stopped") }()
+
+ tk := ticker.New(time.Second)
+ defer tk.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case clock := <-tk.C:
+ m.Debugf("tick %d", clock)
+ m.notify(clock)
+ }
+ }
+}
+
+// Starts starts a job and adds it to the job queue.
+func (m *Manager) Start(job jobpkg.Job) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ go job.Start()
+ m.queue.add(job)
+}
+
+// Stop stops a job and removes it from the job queue.
+func (m *Manager) Stop(fullName string) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ if job := m.queue.remove(fullName); job != nil {
+ job.Stop()
+ }
+}
+
+// Cleanup stops all jobs in the queue.
+func (m *Manager) Cleanup() {
+ for _, v := range m.queue {
+ v.Stop()
+ }
+ m.queue = m.queue[:0]
+}
+
+func (m *Manager) notify(clock int) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ for _, v := range m.queue {
+ v.Tick(clock)
+ }
+}
+
+func (q *queue) add(job jobpkg.Job) {
+ *q = append(*q, job)
+}
+
+func (q *queue) remove(fullName string) jobpkg.Job {
+ for idx, v := range *q {
+ if v.FullName() != fullName {
+ continue
+ }
+ j := (*q)[idx]
+ copy((*q)[idx:], (*q)[idx+1:])
+ (*q)[len(*q)-1] = nil
+ *q = (*q)[:len(*q)-1]
+ return j
+ }
+ return nil
+}
diff --git a/agent/job/run/run_test.go b/agent/job/run/run_test.go
new file mode 100644
index 0000000..2c430a2
--- /dev/null
+++ b/agent/job/run/run_test.go
@@ -0,0 +1,13 @@
+package run
+
+import "testing"
+
+// TODO: tech dept
+func TestNewManager(t *testing.T) {
+
+}
+
+// TODO: tech dept
+func TestManager_Run(t *testing.T) {
+
+}
diff --git a/agent/job/state/state.go b/agent/job/state/state.go
new file mode 100644
index 0000000..986d31f
--- /dev/null
+++ b/agent/job/state/state.go
@@ -0,0 +1,159 @@
+package state
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/logger"
+)
+
+type Manager struct {
+ path string
+ store *Store
+ flushCh chan struct{}
+ *logger.Logger
+}
+
+func NewManager(path string) *Manager {
+ return &Manager{
+ store: &Store{},
+ path: path,
+ flushCh: make(chan struct{}, 1),
+ Logger: logger.New("state save", "manager"),
+ }
+}
+
+func (m *Manager) Run(ctx context.Context) {
+ m.Info("instance is started")
+ defer func() { m.Info("instance is stopped") }()
+
+ tk := time.NewTicker(time.Second * 5)
+ defer tk.Stop()
+ defer m.flush()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-tk.C:
+ select {
+ case <-m.flushCh:
+ m.flush()
+ default:
+ }
+ }
+ }
+}
+
+func (m *Manager) Save(cfg confgroup.Config, state string) {
+ if st, ok := m.store.lookup(cfg); !ok || state != st {
+ m.store.add(cfg, state)
+ m.triggerFlush()
+ }
+}
+
+func (m *Manager) Remove(cfg confgroup.Config) {
+ if _, ok := m.store.lookup(cfg); ok {
+ m.store.remove(cfg)
+ m.triggerFlush()
+ }
+}
+
+func (m *Manager) triggerFlush() {
+ select {
+ case m.flushCh <- struct{}{}:
+ default:
+ }
+}
+
+func (m *Manager) flush() {
+ bs, err := m.store.bytes()
+ if err != nil {
+ return
+ }
+ f, err := os.Create(m.path)
+ if err != nil {
+ return
+ }
+ defer f.Close()
+ _, _ = f.Write(bs)
+}
+
+type Store struct {
+ mux sync.Mutex
+ items map[string]map[string]string // [module][name:hash]state
+}
+
+func (s *Store) Contains(cfg confgroup.Config, states ...string) bool {
+ state, ok := s.lookup(cfg)
+ if !ok {
+ return false
+ }
+ for _, v := range states {
+ if state == v {
+ return true
+ }
+ }
+ return false
+}
+
+func (s *Store) lookup(cfg confgroup.Config) (string, bool) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+
+ v, ok := s.items[cfg.Module()]
+ if !ok {
+ return "", false
+ }
+ state, ok := v[storeKey(cfg)]
+ return state, ok
+}
+
+func (s *Store) add(cfg confgroup.Config, state string) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+
+ if s.items == nil {
+ s.items = make(map[string]map[string]string)
+ }
+ if s.items[cfg.Module()] == nil {
+ s.items[cfg.Module()] = make(map[string]string)
+ }
+ s.items[cfg.Module()][storeKey(cfg)] = state
+}
+
+func (s *Store) remove(cfg confgroup.Config) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+
+ delete(s.items[cfg.Module()], storeKey(cfg))
+ if len(s.items[cfg.Module()]) == 0 {
+ delete(s.items, cfg.Module())
+ }
+}
+
+func (s *Store) bytes() ([]byte, error) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+
+ return json.MarshalIndent(s.items, "", " ")
+}
+
+func Load(path string) (*Store, error) {
+ var s Store
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return &s, json.NewDecoder(f).Decode(&s.items)
+}
+
+func storeKey(cfg confgroup.Config) string {
+ return fmt.Sprintf("%s:%d", cfg.Name(), cfg.Hash())
+}
diff --git a/agent/job/state/state_test.go b/agent/job/state/state_test.go
new file mode 100644
index 0000000..139eb3c
--- /dev/null
+++ b/agent/job/state/state_test.go
@@ -0,0 +1,156 @@
+package state
+
+import (
+ "testing"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// TODO: tech debt
+func TestNewManager(t *testing.T) {
+
+}
+
+// TODO: tech debt
+func TestManager_Run(t *testing.T) {
+
+}
+
+// TODO: tech debt
+func TestManager_Save(t *testing.T) {
+
+}
+
+// TODO: tech debt
+func TestManager_Remove(t *testing.T) {
+
+}
+
+// TODO: tech debt
+func TestState_Contains(t *testing.T) {
+
+}
+
+// TODO: tech debt
+func TestLoad(t *testing.T) {
+
+}
+
+func TestStore_add(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Store
+ input confgroup.Config
+ wantItemsNum int
+ }{
+ "add a cfg to the empty store": {
+ prepare: func() *Store {
+ return &Store{}
+ },
+ input: prepareConfig(
+ "module", "modName",
+ "name", "jobName",
+ ),
+ wantItemsNum: 1,
+ },
+ "add a cfg that already in the store": {
+ prepare: func() *Store {
+ return &Store{
+ items: map[string]map[string]string{
+ "modName": {"jobName:18299273693089411682": "state"},
+ },
+ }
+ },
+ input: prepareConfig(
+ "module", "modName",
+ "name", "jobName",
+ ),
+ wantItemsNum: 1,
+ },
+ "add a cfg with same module, same name, but specific options": {
+ prepare: func() *Store {
+ return &Store{
+ items: map[string]map[string]string{
+ "modName": {"jobName:18299273693089411682": "state"},
+ },
+ }
+ },
+ input: prepareConfig(
+ "module", "modName",
+ "name", "jobName",
+ "opt", "val",
+ ),
+ wantItemsNum: 2,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ s := test.prepare()
+ s.add(test.input, "state")
+ assert.Equal(t, test.wantItemsNum, calcItemsNum(s))
+ })
+ }
+}
+
+func TestStore_remove(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Store
+ input confgroup.Config
+ wantItemsNum int
+ }{
+ "remove a cfg from the empty store": {
+ prepare: func() *Store {
+ return &Store{}
+ },
+ input: prepareConfig(
+ "module", "modName",
+ "name", "jobName",
+ ),
+ wantItemsNum: 0,
+ },
+ "remove a cfg from the store": {
+ prepare: func() *Store {
+ return &Store{
+ items: map[string]map[string]string{
+ "modName": {
+ "jobName:18299273693089411682": "state",
+ "jobName:18299273693089411683": "state",
+ },
+ },
+ }
+ },
+ input: prepareConfig(
+ "module", "modName",
+ "name", "jobName",
+ ),
+ wantItemsNum: 1,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ s := test.prepare()
+ s.remove(test.input)
+ assert.Equal(t, test.wantItemsNum, calcItemsNum(s))
+ })
+ }
+}
+
+func calcItemsNum(s *Store) (num int) {
+ for _, v := range s.items {
+ for range v {
+ num += 1
+ }
+ }
+ return num
+}
+
+func prepareConfig(values ...string) confgroup.Config {
+ cfg := confgroup.Config{}
+ for i := 1; i < len(values); i += 2 {
+ cfg[values[i-1]] = values[i]
+ }
+ return cfg
+}
diff --git a/agent/module/charts.go b/agent/module/charts.go
new file mode 100644
index 0000000..0d67c75
--- /dev/null
+++ b/agent/module/charts.go
@@ -0,0 +1,440 @@
+package module
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "unicode"
+)
+
+type (
+ ChartType string
+ dimAlgo string
+)
+
+const (
+ // Line chart type.
+ Line ChartType = "line"
+ // Area chart type.
+ Area ChartType = "area"
+ // Stacked chart type.
+ Stacked ChartType = "stacked"
+
+ // Absolute dimension algorithm.
+ // The value is to drawn as-is (interpolated to second boundary).
+ Absolute dimAlgo = "absolute"
+ // Incremental dimension algorithm.
+ // The value increases over time, the difference from the last value is presented in the chart,
+ // the server interpolates the value and calculates a per second figure.
+ Incremental dimAlgo = "incremental"
+ // PercentOfAbsolute dimension algorithm.
+ // The percent of this value compared to the total of all dimensions.
+ PercentOfAbsolute dimAlgo = "percentage-of-absolute-row"
+ // PercentOfIncremental dimension algorithm.
+ // The percent of this value compared to the incremental total of all dimensions
+ PercentOfIncremental dimAlgo = "percentage-of-incremental-row"
+)
+
+func (d dimAlgo) String() string {
+ switch d {
+ case Absolute, Incremental, PercentOfAbsolute, PercentOfIncremental:
+ return string(d)
+ }
+ return ""
+}
+
+func (c ChartType) String() string {
+ switch c {
+ case Line, Area, Stacked:
+ return string(c)
+ }
+ return ""
+}
+
+type (
+ // Charts is a collection of Charts.
+ Charts []*Chart
+
+ // Opts represents chart options.
+ Opts struct {
+ Obsolete bool
+ Detail bool
+ StoreFirst bool
+ Hidden bool
+ }
+
+ // Chart represents a chart.
+ // For the full description please visit https://docs.netdata.cloud/collectors/plugins.d/#chart
+ Chart struct {
+ // typeID is the unique identification of the chart, if not specified,
+ // the orchestrator will use job full name + chart ID as typeID (default behaviour).
+ typeID string
+
+ ID string
+ OverID string
+ Title string
+ Units string
+ Fam string
+ Ctx string
+ Type ChartType
+ Priority int
+ Opts
+
+ Dims Dims
+ Vars Vars
+
+ Retries int
+
+ remove bool
+ // created flag is used to indicate whether the chart needs to be created by the orchestrator.
+ created bool
+ // updated flag is used to indicate whether the chart was updated on last data collection interval.
+ updated bool
+
+ // ignore flag is used to indicate that the chart shouldn't be send to the netdata plugins.d
+ ignore bool
+ }
+
+ // DimOpts represents dimension options.
+ DimOpts struct {
+ Obsolete bool
+ Hidden bool
+ NoReset bool
+ NoOverflow bool
+ }
+
+ // Dim represents a chart dimension.
+ // For detailed description please visit https://docs.netdata.cloud/collectors/plugins.d/#dimension.
+ Dim struct {
+ ID string
+ Name string
+ Algo dimAlgo
+ Mul int
+ Div int
+ DimOpts
+
+ remove bool
+ }
+
+ // Var represents a chart variable.
+ // For detailed description please visit https://docs.netdata.cloud/collectors/plugins.d/#variable
+ Var struct {
+ ID string
+ Value int64
+ }
+
+ // Dims is a collection of dims.
+ Dims []*Dim
+ // Vars is a collection of vars.
+ Vars []*Var
+)
+
+func (o Opts) String() string {
+ var b strings.Builder
+ if o.Detail {
+ b.WriteString(" detail")
+ }
+ if o.Hidden {
+ b.WriteString(" hidden")
+ }
+ if o.Obsolete {
+ b.WriteString(" obsolete")
+ }
+ if o.StoreFirst {
+ b.WriteString(" store_first")
+ }
+
+ if len(b.String()) == 0 {
+ return ""
+ }
+ return b.String()[1:]
+}
+
+func (o DimOpts) String() string {
+ var b strings.Builder
+ if o.Hidden {
+ b.WriteString(" hidden")
+ }
+ if o.NoOverflow {
+ b.WriteString(" nooverflow")
+ }
+ if o.NoReset {
+ b.WriteString(" noreset")
+ }
+ if o.Obsolete {
+ b.WriteString(" obsolete")
+ }
+
+ if len(b.String()) == 0 {
+ return ""
+ }
+ return b.String()[1:]
+}
+
+// Add adds (appends) a variable number of Charts.
+func (c *Charts) Add(charts ...*Chart) error {
+ for _, chart := range charts {
+ err := checkChart(chart)
+ if err != nil {
+ return fmt.Errorf("error on adding chart : %s", err)
+ }
+ if chart := c.Get(chart.ID); chart != nil && !chart.remove {
+ return fmt.Errorf("error on adding chart : '%s' is already in charts", chart.ID)
+ }
+ *c = append(*c, chart)
+ }
+
+ return nil
+}
+
+// Get returns the chart by ID.
+func (c Charts) Get(chartID string) *Chart {
+ idx := c.index(chartID)
+ if idx == -1 {
+ return nil
+ }
+ return c[idx]
+}
+
+// Has returns true if ChartsFunc contain the chart with the given ID, false otherwise.
+func (c Charts) Has(chartID string) bool {
+ return c.index(chartID) != -1
+}
+
+// Remove removes the chart from Charts by ID.
+// Avoid to use it in runtime.
+func (c *Charts) Remove(chartID string) error {
+ idx := c.index(chartID)
+ if idx == -1 {
+ return fmt.Errorf("error on removing chart : '%s' is not in charts", chartID)
+ }
+ copy((*c)[idx:], (*c)[idx+1:])
+ (*c)[len(*c)-1] = nil
+ *c = (*c)[:len(*c)-1]
+ return nil
+}
+
+// Copy returns a deep copy of ChartsFunc.
+func (c Charts) Copy() *Charts {
+ charts := Charts{}
+ for idx := range c {
+ charts = append(charts, c[idx].Copy())
+ }
+ return &charts
+}
+
+func (c Charts) index(chartID string) int {
+ for idx := range c {
+ if c[idx].ID == chartID {
+ return idx
+ }
+ }
+ return -1
+}
+
+// MarkNotCreated changes 'created' chart flag to false.
+// Use it to add dimension in runtime.
+func (c *Chart) MarkNotCreated() {
+ c.created = false
+}
+
+// MarkRemove sets 'remove' flag and Obsolete option to true.
+// Use it to remove chart in runtime.
+func (c *Chart) MarkRemove() {
+ c.Obsolete = true
+ c.remove = true
+}
+
+// MarkDimRemove sets 'remove' flag, Obsolete and optionally Hidden options to true.
+// Use it to remove dimension in runtime.
+func (c *Chart) MarkDimRemove(dimID string, hide bool) error {
+ if !c.HasDim(dimID) {
+ return fmt.Errorf("chart '%s' has no '%s' dimension", c.ID, dimID)
+ }
+ dim := c.GetDim(dimID)
+ dim.Obsolete = true
+ if hide {
+ dim.Hidden = true
+ }
+ dim.remove = true
+ return nil
+}
+
+// AddDim adds new dimension to the chart dimensions.
+func (c *Chart) AddDim(newDim *Dim) error {
+ err := checkDim(newDim)
+ if err != nil {
+ return fmt.Errorf("error on adding dim to chart '%s' : %s", c.ID, err)
+ }
+ if c.HasDim(newDim.ID) {
+ return fmt.Errorf("error on adding dim : '%s' is already in chart '%s' dims", newDim.ID, c.ID)
+ }
+ c.Dims = append(c.Dims, newDim)
+
+ return nil
+}
+
+// AddVar adds new variable to the chart variables.
+func (c *Chart) AddVar(newVar *Var) error {
+ err := checkVar(newVar)
+ if err != nil {
+ return fmt.Errorf("error on adding var to chart '%s' : %s", c.ID, err)
+ }
+ if c.indexVar(newVar.ID) != -1 {
+ return fmt.Errorf("error on adding var : '%s' is already in chart '%s' vars", newVar.ID, c.ID)
+ }
+ c.Vars = append(c.Vars, newVar)
+
+ return nil
+}
+
+// GetDim returns dimension by ID.
+func (c *Chart) GetDim(dimID string) *Dim {
+ idx := c.indexDim(dimID)
+ if idx == -1 {
+ return nil
+ }
+ return c.Dims[idx]
+}
+
+// RemoveDim removes dimension by ID.
+// Avoid to use it in runtime.
+func (c *Chart) RemoveDim(dimID string) error {
+ idx := c.indexDim(dimID)
+ if idx == -1 {
+ return fmt.Errorf("error on removing dim : '%s' isn't in chart '%s'", dimID, c.ID)
+ }
+ c.Dims = append(c.Dims[:idx], c.Dims[idx+1:]...)
+
+ return nil
+}
+
+// HasDim returns true if the chart contains dimension with the given ID, false otherwise.
+func (c Chart) HasDim(dimID string) bool {
+ return c.indexDim(dimID) != -1
+}
+
+// Copy returns a deep copy of the chart.
+func (c Chart) Copy() *Chart {
+ chart := c
+ chart.Dims = Dims{}
+ chart.Vars = Vars{}
+
+ for idx := range c.Dims {
+ chart.Dims = append(chart.Dims, c.Dims[idx].copy())
+ }
+ for idx := range c.Vars {
+ chart.Vars = append(chart.Vars, c.Vars[idx].copy())
+ }
+
+ return &chart
+}
+
+func (c Chart) indexDim(dimID string) int {
+ for idx := range c.Dims {
+ if c.Dims[idx].ID == dimID {
+ return idx
+ }
+ }
+ return -1
+}
+
+func (c Chart) indexVar(varID string) int {
+ for idx := range c.Vars {
+ if c.Vars[idx].ID == varID {
+ return idx
+ }
+ }
+ return -1
+}
+
+func (d Dim) copy() *Dim {
+ return &d
+}
+
+func (v Var) copy() *Var {
+ return &v
+}
+
+func checkCharts(charts ...*Chart) error {
+ for _, chart := range charts {
+ err := checkChart(chart)
+ if err != nil {
+ return fmt.Errorf("chart '%s' : %v", chart.ID, err)
+ }
+ }
+ return nil
+}
+
+func checkChart(chart *Chart) error {
+ if chart.ID == "" {
+ return errors.New("empty ID")
+ }
+
+ if chart.Title == "" {
+ return errors.New("empty Title")
+ }
+
+ if chart.Units == "" {
+ return errors.New("empty Units")
+ }
+
+ if id := checkID(chart.ID); id != -1 {
+ return fmt.Errorf("unacceptable symbol in ID : '%c'", id)
+ }
+
+ set := make(map[string]bool)
+
+ for _, d := range chart.Dims {
+ err := checkDim(d)
+ if err != nil {
+ return err
+ }
+ if set[d.ID] {
+ return fmt.Errorf("duplicate dim '%s'", d.ID)
+ }
+ set[d.ID] = true
+ }
+
+ set = make(map[string]bool)
+
+ for _, v := range chart.Vars {
+ if err := checkVar(v); err != nil {
+ return err
+ }
+ if set[v.ID] {
+ return fmt.Errorf("duplicate var '%s'", v.ID)
+ }
+ set[v.ID] = true
+ }
+ return nil
+}
+
+func checkDim(d *Dim) error {
+ if d.ID == "" {
+ return errors.New("empty dim ID")
+ }
+ if id := checkID(d.ID); id != -1 {
+ return fmt.Errorf("unacceptable symbol in dim ID '%s' : '%c'", d.ID, id)
+ }
+ return nil
+}
+
+func checkVar(v *Var) error {
+ if v.ID == "" {
+ return errors.New("empty var ID")
+ }
+ if id := checkID(v.ID); id != -1 {
+ return fmt.Errorf("unacceptable symbol in var ID '%s' : '%c'", v.ID, id)
+ }
+ return nil
+}
+
+func checkID(id string) int {
+ for _, r := range id {
+ if unicode.IsSpace(r) {
+ return int(r)
+ }
+ }
+ return -1
+}
diff --git a/agent/module/charts_test.go b/agent/module/charts_test.go
new file mode 100644
index 0000000..8c96f4b
--- /dev/null
+++ b/agent/module/charts_test.go
@@ -0,0 +1,378 @@
+package module
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func createTestChart(id string) *Chart {
+ return &Chart{
+ ID: id,
+ Title: "Title",
+ Units: "units",
+ Fam: "family",
+ Ctx: "context",
+ Type: Line,
+ Dims: Dims{
+ {ID: "dim1", Algo: Absolute},
+ },
+ Vars: Vars{
+ {ID: "var1", Value: 1},
+ },
+ }
+}
+
+func TestDimAlgo_String(t *testing.T) {
+ cases := []struct {
+ expected string
+ actual fmt.Stringer
+ }{
+ {"line", Line},
+ {"area", Area},
+ {"stacked", Stacked},
+ {"", dimAlgo("wrong")},
+ }
+
+ for _, v := range cases {
+ assert.Equal(t, v.expected, v.actual.String())
+ }
+}
+
+func TestChartType_String(t *testing.T) {
+ cases := []struct {
+ expected string
+ actual fmt.Stringer
+ }{
+ {"absolute", Absolute},
+ {"incremental", Incremental},
+ {"percentage-of-absolute-row", PercentOfAbsolute},
+ {"percentage-of-incremental-row", PercentOfIncremental},
+ {"", ChartType("wrong")},
+ }
+
+ for _, v := range cases {
+ assert.Equal(t, v.expected, v.actual.String())
+ }
+}
+
+func TestOpts_String(t *testing.T) {
+ cases := []struct {
+ expected string
+ actual fmt.Stringer
+ }{
+ {"", Opts{}},
+ {
+ "detail hidden obsolete store_first",
+ Opts{Detail: true, Hidden: true, Obsolete: true, StoreFirst: true},
+ },
+ {
+ "detail hidden obsolete store_first",
+ Opts{Detail: true, Hidden: true, Obsolete: true, StoreFirst: true},
+ },
+ }
+
+ for _, v := range cases {
+ assert.Equal(t, v.expected, v.actual.String())
+ }
+}
+
+func TestDimOpts_String(t *testing.T) {
+ cases := []struct {
+ expected string
+ actual fmt.Stringer
+ }{
+ {"", DimOpts{}},
+ {
+ "hidden nooverflow noreset obsolete",
+ DimOpts{Hidden: true, NoOverflow: true, NoReset: true, Obsolete: true},
+ },
+ {
+ "hidden obsolete",
+ DimOpts{Hidden: true, NoOverflow: false, NoReset: false, Obsolete: true},
+ },
+ }
+
+ for _, v := range cases {
+ assert.Equal(t, v.expected, v.actual.String())
+ }
+}
+
+func TestCharts_Copy(t *testing.T) {
+ orig := &Charts{
+ createTestChart("1"),
+ createTestChart("2"),
+ }
+ copied := orig.Copy()
+
+ require.False(t, orig == copied, "Charts copy points to the same address")
+ require.Len(t, *orig, len(*copied))
+
+ for idx := range *orig {
+ compareCharts(t, (*orig)[idx], (*copied)[idx])
+ }
+}
+
+func TestChart_Copy(t *testing.T) {
+ orig := createTestChart("1")
+
+ compareCharts(t, orig, orig.Copy())
+}
+
+func TestCharts_Add(t *testing.T) {
+ charts := Charts{}
+ chart1 := createTestChart("1")
+ chart2 := createTestChart("2")
+ chart3 := createTestChart("")
+
+ // OK case
+ assert.NoError(t, charts.Add(
+ chart1,
+ chart2,
+ ))
+ assert.Len(t, charts, 2)
+
+ // NG case
+ assert.Error(t, charts.Add(
+ chart3,
+ chart1,
+ chart2,
+ ))
+ assert.Len(t, charts, 2)
+
+ assert.True(t, charts[0] == chart1)
+ assert.True(t, charts[1] == chart2)
+}
+
+func TestCharts_Add_SameID(t *testing.T) {
+ charts := Charts{}
+ chart1 := createTestChart("1")
+ chart2 := createTestChart("1")
+
+ assert.NoError(t, charts.Add(chart1))
+ assert.Error(t, charts.Add(chart2))
+ assert.Len(t, charts, 1)
+
+ charts = Charts{}
+ chart1 = createTestChart("1")
+ chart2 = createTestChart("1")
+
+ assert.NoError(t, charts.Add(chart1))
+ chart1.MarkRemove()
+ assert.NoError(t, charts.Add(chart2))
+ assert.Len(t, charts, 2)
+}
+
+func TestCharts_Get(t *testing.T) {
+ chart := createTestChart("1")
+ charts := Charts{
+ chart,
+ }
+
+ // OK case
+ assert.True(t, chart == charts.Get("1"))
+ // NG case
+ assert.Nil(t, charts.Get("2"))
+}
+
+func TestCharts_Has(t *testing.T) {
+ chart := createTestChart("1")
+ charts := &Charts{
+ chart,
+ }
+
+ // OK case
+ assert.True(t, charts.Has("1"))
+ // NG case
+ assert.False(t, charts.Has("2"))
+}
+
+func TestCharts_Remove(t *testing.T) {
+ chart := createTestChart("1")
+ charts := &Charts{
+ chart,
+ }
+
+ // OK case
+ assert.NoError(t, charts.Remove("1"))
+ assert.Len(t, *charts, 0)
+
+ // NG case
+ assert.Error(t, charts.Remove("2"))
+}
+
+func TestChart_AddDim(t *testing.T) {
+ chart := createTestChart("1")
+ dim := &Dim{ID: "dim2"}
+
+ // OK case
+ assert.NoError(t, chart.AddDim(dim))
+ assert.Len(t, chart.Dims, 2)
+
+ // NG case
+ assert.Error(t, chart.AddDim(dim))
+ assert.Len(t, chart.Dims, 2)
+}
+
+func TestChart_AddVar(t *testing.T) {
+ chart := createTestChart("1")
+ variable := &Var{ID: "var2"}
+
+ // OK case
+ assert.NoError(t, chart.AddVar(variable))
+ assert.Len(t, chart.Vars, 2)
+
+ // NG case
+ assert.Error(t, chart.AddVar(variable))
+ assert.Len(t, chart.Vars, 2)
+}
+
+func TestChart_GetDim(t *testing.T) {
+ chart := &Chart{
+ Dims: Dims{
+ {ID: "1"},
+ {ID: "2"},
+ },
+ }
+
+ // OK case
+ assert.True(t, chart.GetDim("1") != nil && chart.GetDim("1").ID == "1")
+
+ // NG case
+ assert.Nil(t, chart.GetDim("3"))
+}
+
+func TestChart_RemoveDim(t *testing.T) {
+ chart := createTestChart("1")
+
+ // OK case
+ assert.NoError(t, chart.RemoveDim("dim1"))
+ assert.Len(t, chart.Dims, 0)
+
+ // NG case
+ assert.Error(t, chart.RemoveDim("dim2"))
+}
+
+func TestChart_HasDim(t *testing.T) {
+ chart := createTestChart("1")
+
+ // OK case
+ assert.True(t, chart.HasDim("dim1"))
+ // NG case
+ assert.False(t, chart.HasDim("dim2"))
+}
+
+func TestChart_MarkNotCreated(t *testing.T) {
+ chart := createTestChart("1")
+
+ chart.MarkNotCreated()
+ assert.False(t, chart.created)
+}
+
+func TestChart_MarkRemove(t *testing.T) {
+ chart := createTestChart("1")
+
+ chart.MarkRemove()
+ assert.True(t, chart.remove)
+ assert.True(t, chart.Obsolete)
+}
+
+func TestChart_MarkDimRemove(t *testing.T) {
+ chart := createTestChart("1")
+
+ assert.Error(t, chart.MarkDimRemove("dim99", false))
+ assert.NoError(t, chart.MarkDimRemove("dim1", true))
+ assert.True(t, chart.GetDim("dim1").Obsolete)
+ assert.True(t, chart.GetDim("dim1").Hidden)
+ assert.True(t, chart.GetDim("dim1").remove)
+}
+
+func TestChart_check(t *testing.T) {
+ // OK case
+ chart := createTestChart("1")
+ assert.NoError(t, checkChart(chart))
+
+ // NG case
+ chart = createTestChart("1")
+ chart.ID = ""
+ assert.Error(t, checkChart(chart))
+
+ chart = createTestChart("1")
+ chart.ID = "invalid id"
+ assert.Error(t, checkChart(chart))
+
+ chart = createTestChart("1")
+ chart.Title = ""
+ assert.Error(t, checkChart(chart))
+
+ chart = createTestChart("1")
+ chart.Units = ""
+ assert.Error(t, checkChart(chart))
+
+ chart = createTestChart("1")
+ chart.Dims = Dims{
+ {ID: "1"},
+ {ID: "1"},
+ }
+ assert.Error(t, checkChart(chart))
+
+ chart = createTestChart("1")
+ chart.Vars = Vars{
+ {ID: "1"},
+ {ID: "1"},
+ }
+ assert.Error(t, checkChart(chart))
+}
+
+func TestDim_check(t *testing.T) {
+ // OK case
+ dim := &Dim{ID: "id"}
+ assert.NoError(t, checkDim(dim))
+
+ // NG case
+ dim = &Dim{ID: "id"}
+ dim.ID = ""
+ assert.Error(t, checkDim(dim))
+
+ dim = &Dim{ID: "id"}
+ dim.ID = "invalid id"
+ assert.Error(t, checkDim(dim))
+}
+
+func TestVar_check(t *testing.T) {
+ // OK case
+ v := &Var{ID: "id"}
+ assert.NoError(t, checkVar(v))
+
+ // NG case
+ v = &Var{ID: "id"}
+ v.ID = ""
+ assert.Error(t, checkVar(v))
+
+ v = &Var{ID: "id"}
+ v.ID = "invalid id"
+ assert.Error(t, checkVar(v))
+}
+
+func compareCharts(t *testing.T, orig, copied *Chart) {
+ // 1. compare chart pointers
+ // 2. compare Dims, Vars length
+ // 3. compare Dims, Vars pointers
+
+ assert.False(t, orig == copied, "Chart copy ChartsFunc points to the same address")
+
+ require.Len(t, orig.Dims, len(copied.Dims))
+ require.Len(t, orig.Vars, len(copied.Vars))
+
+ for idx := range (*orig).Dims {
+ assert.False(t, orig.Dims[idx] == copied.Dims[idx], "Chart copy dim points to the same address")
+ assert.Equal(t, orig.Dims[idx], copied.Dims[idx], "Chart copy dim isn't equal to orig")
+ }
+
+ for idx := range (*orig).Vars {
+ assert.False(t, orig.Vars[idx] == copied.Vars[idx], "Chart copy var points to the same address")
+ assert.Equal(t, orig.Vars[idx], copied.Vars[idx], "Chart copy var isn't equal to orig")
+ }
+}
diff --git a/agent/module/job.go b/agent/module/job.go
new file mode 100644
index 0000000..dd0b110
--- /dev/null
+++ b/agent/module/job.go
@@ -0,0 +1,467 @@
+package module
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/netdata/go.d.plugin/agent/netdataapi"
+ "github.com/netdata/go.d.plugin/logger"
+)
+
+var writeLock = &sync.Mutex{}
+
+func newRuntimeChart(pluginName string) *Chart {
+ return &Chart{
+ typeID: "netdata",
+ Units: "ms",
+ Fam: pluginName,
+ Ctx: "netdata.go_plugin_execution_time", Priority: 145000,
+ Dims: Dims{
+ {ID: "time"},
+ },
+ }
+}
+
+type JobConfig struct {
+ PluginName string
+ Name string
+ ModuleName string
+ FullName string
+ Module Module
+ Out io.Writer
+ UpdateEvery int
+ AutoDetectEvery int
+ Priority int
+}
+
+const (
+ penaltyStep = 5
+ maxPenalty = 600
+ infTries = -1
+)
+
+func NewJob(cfg JobConfig) *Job {
+ var buf bytes.Buffer
+ return &Job{
+ pluginName: cfg.PluginName,
+ name: cfg.Name,
+ moduleName: cfg.ModuleName,
+ fullName: cfg.FullName,
+ updateEvery: cfg.UpdateEvery,
+ AutoDetectEvery: cfg.AutoDetectEvery,
+ priority: cfg.Priority,
+ module: cfg.Module,
+ out: cfg.Out,
+ AutoDetectTries: infTries,
+ runChart: newRuntimeChart(cfg.PluginName),
+ stop: make(chan struct{}),
+ tick: make(chan int),
+ buf: &buf,
+ api: netdataapi.New(&buf),
+ }
+}
+
+// Job represents a job. It's a module wrapper.
+type Job struct {
+ pluginName string
+ name string
+ moduleName string
+ fullName string
+
+ updateEvery int
+ AutoDetectEvery int
+ AutoDetectTries int
+ priority int
+
+ *logger.Logger
+
+ module Module
+
+ initialized bool
+ panicked bool
+
+ runChart *Chart
+ charts *Charts
+ tick chan int
+ out io.Writer
+ buf *bytes.Buffer
+ api *netdataapi.API
+
+ retries int
+ prevRun time.Time
+
+ stop chan struct{}
+}
+
+// https://github.com/netdata/netdata/blob/ab0ffcebf802803d1e88f6a5e47a314c292b45e3/database/rrd.h#L59
+// Chart type.id (job.FullName() + '.' + chart.ID)
+const RRD_ID_LENGTH_MAX = 200
+
+// FullName returns job full name.
+func (j Job) FullName() string {
+ return j.fullName
+}
+
+// ModuleName returns job module name.
+func (j Job) ModuleName() string {
+ return j.moduleName
+}
+
+// Name returns job name.
+func (j Job) Name() string {
+ return j.name
+}
+
+// Panicked returns 'panicked' flag value.
+func (j Job) Panicked() bool {
+ return j.panicked
+}
+
+// AutoDetectionEvery returns value of AutoDetectEvery.
+func (j Job) AutoDetectionEvery() int {
+ return j.AutoDetectEvery
+}
+
+// RetryAutoDetection returns whether it is needed to retry autodetection.
+func (j Job) RetryAutoDetection() bool {
+ return j.AutoDetectEvery > 0 && (j.AutoDetectTries == infTries || j.AutoDetectTries > 0)
+}
+
+// AutoDetection invokes init, check and postCheck. It handles panic.
+func (j *Job) AutoDetection() (ok bool) {
+ defer func() {
+ if r := recover(); r != nil {
+ ok = false
+ j.Errorf("PANIC %v", r)
+ j.panicked = true
+ j.disableAutoDetection()
+ }
+ if !ok {
+ j.module.Cleanup()
+ }
+ }()
+
+ if ok = j.init(); !ok {
+ j.Error("init failed")
+ j.disableAutoDetection()
+ return
+ }
+ if ok = j.check(); !ok {
+ j.Error("check failed")
+ return
+ }
+ j.Info("check success")
+ if ok = j.postCheck(); !ok {
+ j.Error("postCheck failed")
+ j.disableAutoDetection()
+ return
+ }
+ return true
+}
+
+// Tick Tick.
+func (j *Job) Tick(clock int) {
+ select {
+ case j.tick <- clock:
+ default:
+ j.Debug("skip the tick due to previous run hasn't been finished")
+ }
+}
+
+// Start starts job main loop.
+func (j *Job) Start() {
+ j.Infof("started, data collection interval %ds", j.updateEvery)
+ defer func() { j.Info("stopped") }()
+
+LOOP:
+ for {
+ select {
+ case <-j.stop:
+ break LOOP
+ case t := <-j.tick:
+ if t%(j.updateEvery+j.penalty()) == 0 {
+ j.runOnce()
+ }
+ }
+ }
+ j.module.Cleanup()
+ j.cleanup()
+ j.stop <- struct{}{}
+}
+
+// Stop stops job main loop. It blocks until the job is stopped.
+func (j *Job) Stop() {
+ // TODO: should have blocking and non blocking stop
+ j.stop <- struct{}{}
+ <-j.stop
+}
+
+func (j *Job) disableAutoDetection() {
+ j.AutoDetectEvery = 0
+}
+
+func (j *Job) cleanup() {
+ if j.Logger != nil {
+ logger.GlobalMsgCountWatcher.Unregister(j.Logger)
+ }
+ j.buf.Reset()
+
+ if j.runChart.created {
+ j.runChart.MarkRemove()
+ j.createChart(j.runChart)
+ }
+ if j.charts != nil {
+ for _, chart := range *j.charts {
+ if chart.created {
+ chart.MarkRemove()
+ j.createChart(chart)
+ }
+ }
+ }
+ writeLock.Lock()
+ _, _ = io.Copy(j.out, j.buf)
+ writeLock.Unlock()
+}
+
+func (j *Job) init() bool {
+ if j.initialized {
+ return true
+ }
+
+ log := logger.NewLimited(j.ModuleName(), j.Name())
+ j.Logger = log
+ j.module.GetBase().Logger = log
+
+ j.initialized = j.module.Init()
+ return j.initialized
+}
+
+func (j *Job) check() bool {
+ ok := j.module.Check()
+ if !ok && j.AutoDetectTries != infTries {
+ j.AutoDetectTries--
+ }
+ return ok
+}
+
+func (j *Job) postCheck() bool {
+ if j.charts = j.module.Charts(); j.charts == nil {
+ j.Error("nil charts")
+ return false
+ }
+ if err := checkCharts(*j.charts...); err != nil {
+ j.Errorf("charts check: %v", err)
+ return false
+ }
+ return true
+}
+
+func (j *Job) runOnce() {
+ curTime := time.Now()
+ sinceLastRun := calcSinceLastRun(curTime, j.prevRun)
+ j.prevRun = curTime
+
+ metrics := j.collect()
+
+ if j.panicked {
+ return
+ }
+
+ if j.processMetrics(metrics, curTime, sinceLastRun) {
+ j.retries = 0
+ } else {
+ j.retries++
+ }
+
+ writeLock.Lock()
+ _, _ = io.Copy(j.out, j.buf)
+ writeLock.Unlock()
+ j.buf.Reset()
+}
+
+func (j *Job) collect() (result map[string]int64) {
+ j.panicked = false
+ defer func() {
+ if r := recover(); r != nil {
+ j.Errorf("PANIC: %v", r)
+ j.panicked = true
+ }
+ }()
+ return j.module.Collect()
+}
+
+func (j *Job) processMetrics(metrics map[string]int64, startTime time.Time, sinceLastRun int) bool {
+ if !j.runChart.created {
+ j.runChart.ID = fmt.Sprintf("execution_time_of_%s", j.FullName())
+ j.runChart.Title = fmt.Sprintf("Execution Time for %s", j.FullName())
+ j.createChart(j.runChart)
+ }
+
+ elapsed := int64(durationTo(time.Since(startTime), time.Millisecond))
+
+ var i, updated int
+ for _, chart := range *j.charts {
+ if !chart.created {
+ typeID := fmt.Sprintf("%s.%s", j.FullName(), chart.ID)
+ if len(typeID) >= RRD_ID_LENGTH_MAX {
+ j.Warningf("chart 'type.id' length (%d) >= max allowed (%d), the chart is ignored (%s)",
+ len(typeID), RRD_ID_LENGTH_MAX, typeID)
+ chart.ignore = true
+ }
+ j.createChart(chart)
+ }
+ if chart.remove {
+ continue
+ }
+ (*j.charts)[i] = chart
+ i++
+ if len(metrics) == 0 || chart.Obsolete {
+ continue
+ }
+ if j.updateChart(chart, metrics, sinceLastRun) {
+ updated++
+ }
+ }
+ *j.charts = (*j.charts)[:i]
+
+ if updated == 0 {
+ return false
+ }
+ j.updateChart(j.runChart, map[string]int64{"time": elapsed}, sinceLastRun)
+ return true
+}
+
+func (j *Job) createChart(chart *Chart) {
+ defer func() { chart.created = true }()
+ if chart.ignore {
+ return
+ }
+
+ if chart.Priority == 0 {
+ chart.Priority = j.priority
+ j.priority++
+ }
+ _ = j.api.CHART(
+ firstNotEmpty(chart.typeID, j.FullName()),
+ chart.ID,
+ chart.OverID,
+ chart.Title,
+ chart.Units,
+ chart.Fam,
+ chart.Ctx,
+ chart.Type.String(),
+ chart.Priority,
+ j.updateEvery,
+ chart.Opts.String(),
+ j.pluginName,
+ j.moduleName,
+ )
+ for _, dim := range chart.Dims {
+ _ = j.api.DIMENSION(
+ dim.ID,
+ dim.Name,
+ dim.Algo.String(),
+ handleZero(dim.Mul),
+ handleZero(dim.Div),
+ dim.DimOpts.String(),
+ )
+ }
+ for _, v := range chart.Vars {
+ _ = j.api.VARIABLE(
+ v.ID,
+ v.Value,
+ )
+ }
+ _ = j.api.EMPTYLINE()
+}
+
+func (j *Job) updateChart(chart *Chart, collected map[string]int64, sinceLastRun int) bool {
+ if chart.ignore {
+ dims := chart.Dims[:0]
+ for _, dim := range chart.Dims {
+ if !dim.remove {
+ dims = append(dims, dim)
+ }
+ }
+ chart.Dims = dims
+ return false
+ }
+
+ if !chart.updated {
+ sinceLastRun = 0
+ }
+
+ _ = j.api.BEGIN(
+ firstNotEmpty(chart.typeID, j.FullName()),
+ chart.ID,
+ sinceLastRun,
+ )
+ var i, updated int
+ for _, dim := range chart.Dims {
+ if dim.remove {
+ continue
+ }
+ chart.Dims[i] = dim
+ i++
+ if v, ok := collected[dim.ID]; !ok {
+ _ = j.api.SETEMPTY(dim.ID)
+ } else {
+ _ = j.api.SET(dim.ID, v)
+ updated++
+ }
+ }
+ chart.Dims = chart.Dims[:i]
+
+ for _, vr := range chart.Vars {
+ if v, ok := collected[vr.ID]; ok {
+ _ = j.api.VARIABLE(vr.ID, v)
+ }
+
+ }
+ _ = j.api.END()
+
+ if chart.updated = updated > 0; chart.updated {
+ chart.Retries = 0
+ } else {
+ chart.Retries++
+ }
+ return chart.updated
+}
+
+func (j Job) penalty() int {
+ v := j.retries / penaltyStep * penaltyStep * j.updateEvery / 2
+ if v > maxPenalty {
+ return maxPenalty
+ }
+ return v
+}
+
+func calcSinceLastRun(curTime, prevRun time.Time) int {
+ if prevRun.IsZero() {
+ return 0
+ }
+ return int((curTime.UnixNano() - prevRun.UnixNano()) / 1000)
+}
+
+func durationTo(duration time.Duration, to time.Duration) int {
+ return int(int64(duration) / (int64(to) / int64(time.Nanosecond)))
+}
+
+func firstNotEmpty(values ...string) string {
+ for _, v := range values {
+ if v != "" {
+ return v
+ }
+ }
+ return ""
+}
+
+func handleZero(v int) int {
+ if v == 0 {
+ return 1
+ }
+ return v
+}
diff --git a/agent/module/job_test.go b/agent/module/job_test.go
new file mode 100644
index 0000000..207982f
--- /dev/null
+++ b/agent/module/job_test.go
@@ -0,0 +1,288 @@
+package module
+
+import (
+ "fmt"
+ "io/ioutil"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+const (
+ pluginName = "plugin"
+ modName = "module"
+ jobName = "job"
+)
+
+func newTestJob() *Job {
+ return NewJob(
+ JobConfig{
+ PluginName: pluginName,
+ Name: jobName,
+ ModuleName: modName,
+ FullName: modName + "_" + jobName,
+ Module: nil,
+ Out: ioutil.Discard,
+ UpdateEvery: 0,
+ AutoDetectEvery: 0,
+ Priority: 0,
+ },
+ )
+}
+
+func TestNewJob(t *testing.T) {
+ assert.IsType(t, (*Job)(nil), newTestJob())
+}
+
+func TestJob_FullName(t *testing.T) {
+ job := newTestJob()
+
+ assert.Equal(t, job.FullName(), fmt.Sprintf("%s_%s", modName, jobName))
+}
+
+func TestJob_ModuleName(t *testing.T) {
+ job := newTestJob()
+
+ assert.Equal(t, job.ModuleName(), modName)
+}
+
+func TestJob_Name(t *testing.T) {
+ job := newTestJob()
+
+ assert.Equal(t, job.Name(), jobName)
+}
+
+func TestJob_Panicked(t *testing.T) {
+ job := newTestJob()
+
+ assert.Equal(t, job.Panicked(), job.panicked)
+ job.panicked = true
+ assert.Equal(t, job.Panicked(), job.panicked)
+}
+
+func TestJob_AutoDetectionEvery(t *testing.T) {
+ job := newTestJob()
+
+ assert.Equal(t, job.AutoDetectionEvery(), job.AutoDetectEvery)
+}
+
+func TestJob_RetryAutoDetection(t *testing.T) {
+ job := newTestJob()
+ m := &MockModule{
+ InitFunc: func() bool {
+ return true
+ },
+ CheckFunc: func() bool { return false },
+ ChartsFunc: func() *Charts {
+ return &Charts{}
+ },
+ }
+ job.module = m
+ job.AutoDetectEvery = 1
+
+ assert.True(t, job.RetryAutoDetection())
+ assert.Equal(t, infTries, job.AutoDetectTries)
+ for i := 0; i < 1000; i++ {
+ job.check()
+ }
+ assert.True(t, job.RetryAutoDetection())
+ assert.Equal(t, infTries, job.AutoDetectTries)
+
+ job.AutoDetectTries = 10
+ for i := 0; i < 10; i++ {
+ job.check()
+ }
+ assert.False(t, job.RetryAutoDetection())
+ assert.Equal(t, 0, job.AutoDetectTries)
+}
+
+func TestJob_AutoDetection(t *testing.T) {
+ job := newTestJob()
+ var v int
+ m := &MockModule{
+ InitFunc: func() bool {
+ v++
+ return true
+ },
+ CheckFunc: func() bool {
+ v++
+ return true
+ },
+ ChartsFunc: func() *Charts {
+ v++
+ return &Charts{}
+ },
+ }
+ job.module = m
+
+ assert.True(t, job.AutoDetection())
+ assert.Equal(t, 3, v)
+}
+
+func TestJob_AutoDetection_FailInit(t *testing.T) {
+ job := newTestJob()
+ m := &MockModule{
+ InitFunc: func() bool {
+ return false
+ },
+ }
+ job.module = m
+
+ assert.False(t, job.AutoDetection())
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_AutoDetection_FailCheck(t *testing.T) {
+ job := newTestJob()
+ m := &MockModule{
+ InitFunc: func() bool {
+ return true
+ },
+ CheckFunc: func() bool {
+ return false
+ },
+ }
+ job.module = m
+
+ assert.False(t, job.AutoDetection())
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_AutoDetection_FailPostCheck(t *testing.T) {
+ job := newTestJob()
+ m := &MockModule{
+ InitFunc: func() bool {
+ return true
+ },
+ CheckFunc: func() bool {
+ return true
+ },
+ ChartsFunc: func() *Charts {
+ return nil
+ },
+ }
+ job.module = m
+
+ assert.False(t, job.AutoDetection())
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_AutoDetection_PanicInit(t *testing.T) {
+ job := newTestJob()
+ m := &MockModule{
+ InitFunc: func() bool {
+ panic("panic in Init")
+ },
+ }
+ job.module = m
+
+ assert.False(t, job.AutoDetection())
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_AutoDetection_PanicCheck(t *testing.T) {
+ job := newTestJob()
+ m := &MockModule{
+ InitFunc: func() bool {
+ return true
+ },
+ CheckFunc: func() bool {
+ panic("panic in Check")
+ },
+ }
+ job.module = m
+
+ assert.False(t, job.AutoDetection())
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_AutoDetection_PanicPostCheck(t *testing.T) {
+ job := newTestJob()
+ m := &MockModule{
+ InitFunc: func() bool {
+ return true
+ },
+ CheckFunc: func() bool {
+ return true
+ },
+ ChartsFunc: func() *Charts {
+ panic("panic in PostCheck")
+ },
+ }
+ job.module = m
+
+ assert.False(t, job.AutoDetection())
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_Start(t *testing.T) {
+ m := &MockModule{
+ ChartsFunc: func() *Charts {
+ return &Charts{
+ &Chart{
+ ID: "id",
+ Title: "title",
+ Units: "units",
+ Dims: Dims{
+ {ID: "id1"},
+ {ID: "id2"},
+ },
+ },
+ }
+ },
+ CollectFunc: func() map[string]int64 {
+ return map[string]int64{
+ "id1": 1,
+ "id2": 2,
+ }
+ },
+ }
+ job := newTestJob()
+ job.module = m
+ job.charts = job.module.Charts()
+ job.updateEvery = 1
+
+ go func() {
+ for i := 1; i < 3; i++ {
+ job.Tick(i)
+ time.Sleep(time.Second)
+ }
+ job.Stop()
+ }()
+
+ job.Start()
+
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_MainLoop_Panic(t *testing.T) {
+ m := &MockModule{
+ CollectFunc: func() map[string]int64 {
+ panic("panic in Collect")
+ },
+ }
+ job := newTestJob()
+ job.module = m
+ job.updateEvery = 1
+
+ go func() {
+ for i := 1; i < 3; i++ {
+ time.Sleep(time.Second)
+ job.Tick(i)
+ }
+ job.Stop()
+ }()
+
+ job.Start()
+
+ assert.True(t, job.Panicked())
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_Tick(t *testing.T) {
+ job := newTestJob()
+ for i := 0; i < 3; i++ {
+ job.Tick(i)
+ }
+}
diff --git a/agent/module/mock.go b/agent/module/mock.go
new file mode 100644
index 0000000..d5c157d
--- /dev/null
+++ b/agent/module/mock.go
@@ -0,0 +1,53 @@
+package module
+
+// MockModule MockModule.
+type MockModule struct {
+ Base
+
+ InitFunc func() bool
+ CheckFunc func() bool
+ ChartsFunc func() *Charts
+ CollectFunc func() map[string]int64
+ CleanupFunc func()
+ CleanupDone bool
+}
+
+// Init invokes InitFunc.
+func (m MockModule) Init() bool {
+ if m.InitFunc == nil {
+ return true
+ }
+ return m.InitFunc()
+}
+
+// Check invokes CheckFunc.
+func (m MockModule) Check() bool {
+ if m.CheckFunc == nil {
+ return true
+ }
+ return m.CheckFunc()
+}
+
+// Charts invokes ChartsFunc.
+func (m MockModule) Charts() *Charts {
+ if m.ChartsFunc == nil {
+ return nil
+ }
+ return m.ChartsFunc()
+}
+
+// Collect invokes CollectDunc.
+func (m MockModule) Collect() map[string]int64 {
+ if m.CollectFunc == nil {
+ return nil
+ }
+ return m.CollectFunc()
+}
+
+// Cleanup sets CleanupDone to true.
+func (m *MockModule) Cleanup() {
+ if m.CleanupFunc != nil {
+ m.CleanupFunc()
+ }
+ m.CleanupDone = true
+}
diff --git a/agent/module/mock_test.go b/agent/module/mock_test.go
new file mode 100644
index 0000000..f06656d
--- /dev/null
+++ b/agent/module/mock_test.go
@@ -0,0 +1,52 @@
+package module
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMockModule_Init(t *testing.T) {
+ m := &MockModule{}
+
+ assert.True(t, m.Init())
+ m.InitFunc = func() bool { return false }
+ assert.False(t, m.Init())
+}
+
+func TestMockModule_Check(t *testing.T) {
+ m := &MockModule{}
+
+ assert.True(t, m.Check())
+ m.CheckFunc = func() bool { return false }
+ assert.False(t, m.Check())
+}
+
+func TestMockModule_Charts(t *testing.T) {
+ m := &MockModule{}
+ c := &Charts{}
+
+ assert.Nil(t, m.Charts())
+ m.ChartsFunc = func() *Charts { return c }
+ assert.True(t, c == m.Charts())
+}
+
+func TestMockModule_Collect(t *testing.T) {
+ m := &MockModule{}
+ d := map[string]int64{
+ "1": 1,
+ }
+
+ assert.Nil(t, m.Collect())
+ m.CollectFunc = func() map[string]int64 { return d }
+ assert.Equal(t, d, m.Collect())
+}
+
+func TestMockModule_Cleanup(t *testing.T) {
+ m := &MockModule{}
+ require.False(t, m.CleanupDone)
+
+ m.Cleanup()
+ assert.True(t, m.CleanupDone)
+}
diff --git a/agent/module/module.go b/agent/module/module.go
new file mode 100644
index 0000000..3fbec0d
--- /dev/null
+++ b/agent/module/module.go
@@ -0,0 +1,35 @@
+package module
+
+import (
+ "github.com/netdata/go.d.plugin/logger"
+)
+
+// Module is an interface that represents a module.
+type Module interface {
+ // Init does initialization.
+ // If it return false, the job will be disabled.
+ Init() bool
+
+ // Check is called after Init.
+ // If it return false, the job will be disabled.
+ Check() bool
+
+ // Charts returns the chart definition.
+ // Make sure not to share returned instance.
+ Charts() *Charts
+
+ // Collect collects metrics.
+ Collect() map[string]int64
+
+ // Cleanup Cleanup
+ Cleanup()
+
+ GetBase() *Base
+}
+
+// Base is a helper struct. All modules should embed this struct.
+type Base struct {
+ *logger.Logger
+}
+
+func (b *Base) GetBase() *Base { return b }
diff --git a/agent/module/registry.go b/agent/module/registry.go
new file mode 100644
index 0000000..d1ac9cb
--- /dev/null
+++ b/agent/module/registry.go
@@ -0,0 +1,43 @@
+package module
+
+import "fmt"
+
+const (
+ UpdateEvery = 1
+ AutoDetectionRetry = 0
+ Priority = 70000
+)
+
+// Defaults is a set of module default parameters.
+type Defaults struct {
+ UpdateEvery int
+ AutoDetectionRetry int
+ Priority int
+ Disabled bool
+}
+
+type (
+ // Creator is a Job builder.
+ Creator struct {
+ Defaults
+ Create func() Module
+ }
+ // Registry is a collection of Creators.
+ Registry map[string]Creator
+)
+
+// DefaultRegistry DefaultRegistry.
+var DefaultRegistry = Registry{}
+
+// Register registers a module in the DefaultRegistry.
+func Register(name string, creator Creator) {
+ DefaultRegistry.Register(name, creator)
+}
+
+// Register registers a module.
+func (r Registry) Register(name string, creator Creator) {
+ if _, ok := r[name]; ok {
+ panic(fmt.Sprintf("%s is already in registry", name))
+ }
+ r[name] = creator
+}
diff --git a/agent/module/registry_test.go b/agent/module/registry_test.go
new file mode 100644
index 0000000..1d8633d
--- /dev/null
+++ b/agent/module/registry_test.go
@@ -0,0 +1,32 @@
+package module
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRegister(t *testing.T) {
+ modName := "modName"
+ registry := make(Registry)
+
+ // OK case
+ assert.NotPanics(
+ t,
+ func() {
+ registry.Register(modName, Creator{})
+ })
+
+ _, exist := registry[modName]
+
+ require.True(t, exist)
+
+ // Panic case
+ assert.Panics(
+ t,
+ func() {
+ registry.Register(modName, Creator{})
+ })
+
+}
diff --git a/agent/netdataapi/api.go b/agent/netdataapi/api.go
new file mode 100644
index 0000000..6a927f6
--- /dev/null
+++ b/agent/netdataapi/api.go
@@ -0,0 +1,101 @@
+package netdataapi
+
+import (
+ "fmt"
+ "io"
+)
+
+type (
+ // API implements Netdata external plugins API.
+ // https://learn.netdata.cloud/docs/agent/collectors/plugins.d#the-output-of-the-plugin
+ API struct {
+ io.Writer
+ }
+)
+
+func New(w io.Writer) *API { return &API{w} }
+
+// CHART create or update a chart.
+func (a *API) CHART(
+ typeID string,
+ ID string,
+ name string,
+ title string,
+ units string,
+ family string,
+ context string,
+ chartType string,
+ priority int,
+ updateEvery int,
+ options string,
+ plugin string,
+ module string) error {
+ _, err := fmt.Fprintf(a, "CHART '%s.%s' '%s' '%s' '%s' '%s' '%s' '%s' '%d' '%d' '%s' '%s' '%s'\n",
+ typeID, ID, name, title, units, family, context, chartType, priority, updateEvery, options, plugin, module)
+ return err
+}
+
+// DIMENSION add or update a dimension to the chart just created.
+func (a *API) DIMENSION(
+ ID string,
+ name string,
+ algorithm string,
+ multiplier int,
+ divisor int,
+ options string) error {
+ _, err := fmt.Fprintf(a, "DIMENSION '%s' '%s' '%s' '%d' '%d' '%s'\n",
+ ID, name, algorithm, multiplier, divisor, options)
+ return err
+}
+
+// BEGIN initialize data collection for a chart.
+func (a *API) BEGIN(typeID string, ID string, msSince int) (err error) {
+ if msSince > 0 {
+ _, err = fmt.Fprintf(a, "BEGIN '%s.%s' %d\n", typeID, ID, msSince)
+ } else {
+ _, err = fmt.Fprintf(a, "BEGIN '%s.%s'\n", typeID, ID)
+ }
+ return err
+}
+
+// SET set the value of a dimension for the initialized chart.
+func (a *API) SET(ID string, value int64) error {
+ _, err := fmt.Fprintf(a, "SET '%s' = %d\n", ID, value)
+ return err
+}
+
+// SETEMPTY set the empty value of a dimension for the initialized chart.
+func (a *API) SETEMPTY(ID string) error {
+ _, err := fmt.Fprintf(a, "SET '%s' = \n", ID)
+ return err
+}
+
+// VARIABLE set the value of a CHART scope variable for the initialized chart.
+func (a *API) VARIABLE(ID string, value int64) error {
+ _, err := fmt.Fprintf(a, "VARIABLE CHART '%s' = %d\n", ID, value)
+ return err
+}
+
+// END complete data collection for the initialized chart.
+func (a *API) END() error {
+ _, err := fmt.Fprintf(a, "END\n\n")
+ return err
+}
+
+// FLUSH ignore the last collected values.
+func (a *API) FLUSH() error {
+ _, err := fmt.Fprintf(a, "FLUSH\n")
+ return err
+}
+
+// DISABLE disable this plugin. This will prevent Netdata from restarting the plugin.
+func (a *API) DISABLE() error {
+ _, err := fmt.Fprintf(a, "DISABLE\n")
+ return err
+}
+
+// EMPTYLINE write an empty line.
+func (a *API) EMPTYLINE() error {
+ _, err := fmt.Fprintf(a, "\n")
+ return err
+}
diff --git a/agent/netdataapi/api_test.go b/agent/netdataapi/api_test.go
new file mode 100644
index 0000000..445ef82
--- /dev/null
+++ b/agent/netdataapi/api_test.go
@@ -0,0 +1,151 @@
+package netdataapi
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAPI_CHART(t *testing.T) {
+ b := &bytes.Buffer{}
+ netdataAPI := API{Writer: b}
+
+ _ = netdataAPI.CHART(
+ "",
+ "id",
+ "name",
+ "title",
+ "units",
+ "family",
+ "context",
+ "line",
+ 1,
+ 1,
+ "",
+ "orchestrator",
+ "module",
+ )
+
+ assert.Equal(
+ t,
+ "CHART '.id' 'name' 'title' 'units' 'family' 'context' 'line' '1' '1' '' 'orchestrator' 'module'\n",
+ b.String(),
+ )
+}
+
+func TestAPI_DIMENSION(t *testing.T) {
+ b := &bytes.Buffer{}
+ netdataAPI := API{Writer: b}
+
+ _ = netdataAPI.DIMENSION(
+ "id",
+ "name",
+ "absolute",
+ 1,
+ 1,
+ "",
+ )
+
+ assert.Equal(
+ t,
+ "DIMENSION 'id' 'name' 'absolute' '1' '1' ''\n",
+ b.String(),
+ )
+}
+
+func TestAPI_BEGIN(t *testing.T) {
+ b := &bytes.Buffer{}
+ netdataAPI := API{Writer: b}
+
+ _ = netdataAPI.BEGIN(
+ "typeID",
+ "id",
+ 0,
+ )
+
+ assert.Equal(
+ t,
+ "BEGIN 'typeID.id'\n",
+ b.String(),
+ )
+
+ b.Reset()
+
+ _ = netdataAPI.BEGIN(
+ "typeID",
+ "id",
+ 1,
+ )
+
+ assert.Equal(
+ t,
+ "BEGIN 'typeID.id' 1\n",
+ b.String(),
+ )
+}
+
+func TestAPI_SET(t *testing.T) {
+ b := &bytes.Buffer{}
+ netdataAPI := API{Writer: b}
+
+ _ = netdataAPI.SET("id", 100)
+
+ assert.Equal(
+ t,
+ "SET 'id' = 100\n",
+ b.String(),
+ )
+}
+
+func TestAPI_SETEMPTY(t *testing.T) {
+ b := &bytes.Buffer{}
+ netdataAPI := API{Writer: b}
+
+ _ = netdataAPI.SETEMPTY("id")
+
+ assert.Equal(
+ t,
+ "SET 'id' = \n",
+ b.String(),
+ )
+}
+
+func TestAPI_VARIABLE(t *testing.T) {
+ b := &bytes.Buffer{}
+ netdataAPI := API{Writer: b}
+
+ _ = netdataAPI.VARIABLE("id", 100)
+
+ assert.Equal(
+ t,
+ "VARIABLE CHART 'id' = 100\n",
+ b.String(),
+ )
+}
+
+func TestAPI_END(t *testing.T) {
+ b := &bytes.Buffer{}
+ netdataAPI := API{Writer: b}
+
+ _ = netdataAPI.END()
+
+ assert.Equal(
+ t,
+ "END\n\n",
+ b.String(),
+ )
+}
+
+func TestAPI_FLUSH(t *testing.T) {
+ b := &bytes.Buffer{}
+ netdataAPI := API{Writer: b}
+
+ _ = netdataAPI.FLUSH()
+
+ assert.Equal(
+ t,
+ "FLUSH\n",
+ b.String(),
+ )
+}
diff --git a/agent/setup.go b/agent/setup.go
new file mode 100644
index 0000000..541940b
--- /dev/null
+++ b/agent/setup.go
@@ -0,0 +1,201 @@
+package agent
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/netdata/go.d.plugin/agent/job/confgroup"
+ "github.com/netdata/go.d.plugin/agent/job/discovery"
+ "github.com/netdata/go.d.plugin/agent/job/discovery/dummy"
+ "github.com/netdata/go.d.plugin/agent/job/discovery/file"
+ "github.com/netdata/go.d.plugin/agent/module"
+
+ "gopkg.in/yaml.v2"
+)
+
+func defaultConfig() config {
+ return config{
+ Enabled: true,
+ DefaultRun: true,
+ MaxProcs: 0,
+ Modules: nil,
+ }
+}
+
+type config struct {
+ Enabled bool `yaml:"enabled"`
+ DefaultRun bool `yaml:"default_run"`
+ MaxProcs int `yaml:"max_procs"`
+ Modules map[string]bool `yaml:"modules"`
+}
+
+func (c config) String() string {
+ return fmt.Sprintf("enabled '%v', default_run '%v', max_procs '%d'",
+ c.Enabled, c.DefaultRun, c.MaxProcs)
+}
+
+func (a *Agent) loadPluginConfig() config {
+ a.Info("loading config file")
+
+ if len(a.ConfDir) == 0 {
+ a.Info("config dir not provided, will use defaults")
+ return defaultConfig()
+ }
+
+ cfgPath := a.Name + ".conf"
+ a.Infof("looking for '%s' in %v", cfgPath, a.ConfDir)
+
+ path, err := a.ConfDir.Find(cfgPath)
+ if err != nil || path == "" {
+ a.Warning("couldn't find config, will use defaults")
+ return defaultConfig()
+ }
+ a.Infof("found '%s", path)
+
+ cfg := defaultConfig()
+ if err := loadYAML(&cfg, path); err != nil {
+ a.Warningf("couldn't load config '%s': %v, will use defaults", path, err)
+ return defaultConfig()
+ }
+ a.Info("config successfully loaded")
+ return cfg
+}
+
+func (a *Agent) loadEnabledModules(cfg config) module.Registry {
+ a.Info("loading modules")
+
+ all := a.RunModule == "all" || a.RunModule == ""
+ enabled := module.Registry{}
+
+ for name, creator := range a.ModuleRegistry {
+ if !all && a.RunModule != name {
+ continue
+ }
+ if all && creator.Disabled && !cfg.isExplicitlyEnabled(name) {
+ a.Infof("'%s' module disabled by default, should be explicitly enabled in the config", name)
+ continue
+ }
+ if all && !cfg.isImplicitlyEnabled(name) {
+ a.Infof("'%s' module disabled in the config file", name)
+ continue
+ }
+ enabled[name] = creator
+ }
+ a.Infof("enabled/registered modules: %d/%d", len(enabled), len(a.ModuleRegistry))
+ return enabled
+}
+
+func (a *Agent) buildDiscoveryConf(enabled module.Registry) discovery.Config {
+ a.Info("building discovery config")
+
+ reg := confgroup.Registry{}
+ for name, creator := range enabled {
+ reg.Register(name, confgroup.Default{
+ MinUpdateEvery: a.MinUpdateEvery,
+ UpdateEvery: creator.UpdateEvery,
+ AutoDetectionRetry: creator.AutoDetectionRetry,
+ Priority: creator.Priority,
+ })
+ }
+
+ var readPaths, dummyPaths []string
+
+ if len(a.ModulesConfDir) == 0 {
+ a.Info("modules conf dir not provided, will use default config for all enabled modules")
+ for name := range enabled {
+ dummyPaths = append(dummyPaths, name)
+ }
+ return discovery.Config{
+ Registry: reg,
+ Dummy: dummy.Config{Names: dummyPaths}}
+ }
+
+ for name := range enabled {
+ cfgPath := name + ".conf"
+ a.Infof("looking for '%s' in %v", cfgPath, a.ModulesConfDir)
+
+ path, err := a.ModulesConfDir.Find(cfgPath)
+ if err != nil {
+ a.Infof("couldn't find '%s' module config, will use default config", name)
+ dummyPaths = append(dummyPaths, name)
+ } else {
+ a.Infof("found '%s", path)
+ readPaths = append(readPaths, path)
+ }
+ }
+
+ a.Infof("dummy/read/watch paths: %d/%d/%d", len(dummyPaths), len(readPaths), len(a.ModulesSDConfPath))
+ return discovery.Config{
+ Registry: reg,
+ File: file.Config{
+ Read: readPaths,
+ Watch: a.ModulesSDConfPath,
+ },
+ Dummy: dummy.Config{
+ Names: dummyPaths,
+ },
+ }
+}
+
+func (c config) isExplicitlyEnabled(moduleName string) bool {
+ return c.isEnabled(moduleName, true)
+}
+
+func (c config) isImplicitlyEnabled(moduleName string) bool {
+ return c.isEnabled(moduleName, false)
+}
+
+func (c config) isEnabled(moduleName string, explicit bool) bool {
+ if enabled, ok := c.Modules[moduleName]; ok {
+ return enabled
+ }
+ if explicit {
+ return false
+ }
+ return c.DefaultRun
+}
+
+func (c *config) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ type plain config
+ if err := unmarshal((*plain)(c)); err != nil {
+ return err
+ }
+
+ var m map[string]interface{}
+ if err := unmarshal(&m); err != nil {
+ return err
+ }
+
+ for key, value := range m {
+ switch key {
+ case "enabled", "default_run", "max_procs", "modules":
+ continue
+ }
+ var b bool
+ if in, err := yaml.Marshal(value); err != nil || yaml.Unmarshal(in, &b) != nil {
+ continue
+ }
+ if c.Modules == nil {
+ c.Modules = make(map[string]bool)
+ }
+ c.Modules[key] = b
+ }
+ return nil
+}
+
+func loadYAML(conf interface{}, path string) error {
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ if err = yaml.NewDecoder(f).Decode(conf); err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
diff --git a/agent/setup_test.go b/agent/setup_test.go
new file mode 100644
index 0000000..11d1551
--- /dev/null
+++ b/agent/setup_test.go
@@ -0,0 +1,207 @@
+package agent
+
+import (
+ "testing"
+
+ "github.com/netdata/go.d.plugin/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v2"
+)
+
+func TestConfig_UnmarshalYAML(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ wantCfg config
+ }{
+ "valid configuration": {
+ input: "enabled: yes\ndefault_run: yes\nmodules:\n module1: yes\n module2: yes",
+ wantCfg: config{
+ Enabled: true,
+ DefaultRun: true,
+ Modules: map[string]bool{
+ "module1": true,
+ "module2": true,
+ },
+ },
+ },
+ "valid configuration with broken modules section": {
+ input: "enabled: yes\ndefault_run: yes\nmodules:\nmodule1: yes\nmodule2: yes",
+ wantCfg: config{
+ Enabled: true,
+ DefaultRun: true,
+ Modules: map[string]bool{
+ "module1": true,
+ "module2": true,
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ var cfg config
+ err := yaml.Unmarshal([]byte(test.input), &cfg)
+ require.NoError(t, err)
+ assert.Equal(t, test.wantCfg, cfg)
+ })
+ }
+}
+
+func TestAgent_loadConfig(t *testing.T) {
+ tests := map[string]struct {
+ agent Agent
+ wantCfg config
+ }{
+ "valid config file": {
+ agent: Agent{
+ Name: "agent-valid",
+ ConfDir: []string{"testdata"},
+ },
+ wantCfg: config{
+ Enabled: true,
+ DefaultRun: true,
+ MaxProcs: 1,
+ Modules: map[string]bool{
+ "module1": true,
+ "module2": true,
+ },
+ },
+ },
+ "no config path provided": {
+ agent: Agent{},
+ wantCfg: defaultConfig(),
+ },
+ "config file not found": {
+ agent: Agent{
+ Name: "agent",
+ ConfDir: []string{"testdata/not-exist"},
+ },
+ wantCfg: defaultConfig(),
+ },
+ "empty config file": {
+ agent: Agent{
+ Name: "agent-empty",
+ ConfDir: []string{"testdata"},
+ },
+ wantCfg: defaultConfig(),
+ },
+ "invalid syntax config file": {
+ agent: Agent{
+ Name: "agent-invalid-syntax",
+ ConfDir: []string{"testdata"},
+ },
+ wantCfg: defaultConfig(),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.wantCfg, test.agent.loadPluginConfig())
+ })
+ }
+}
+
+func TestAgent_loadEnabledModules(t *testing.T) {
+ tests := map[string]struct {
+ agent Agent
+ cfg config
+ wantModules module.Registry
+ }{
+ "load all, module disabled by default but explicitly enabled": {
+ agent: Agent{
+ ModuleRegistry: module.Registry{
+ "module1": module.Creator{Defaults: module.Defaults{Disabled: true}},
+ },
+ },
+ cfg: config{
+ Modules: map[string]bool{"module1": true},
+ },
+ wantModules: module.Registry{
+ "module1": module.Creator{Defaults: module.Defaults{Disabled: true}},
+ },
+ },
+ "load all, module disabled by default and not explicitly enabled": {
+ agent: Agent{
+ ModuleRegistry: module.Registry{
+ "module1": module.Creator{Defaults: module.Defaults{Disabled: true}},
+ },
+ },
+ wantModules: module.Registry{},
+ },
+ "load all, module in config modules (default_run=true)": {
+ agent: Agent{
+ ModuleRegistry: module.Registry{
+ "module1": module.Creator{},
+ },
+ },
+ cfg: config{
+ Modules: map[string]bool{"module1": true},
+ DefaultRun: true,
+ },
+ wantModules: module.Registry{
+ "module1": module.Creator{},
+ },
+ },
+ "load all, module not in config modules (default_run=true)": {
+ agent: Agent{
+ ModuleRegistry: module.Registry{"module1": module.Creator{}},
+ },
+ cfg: config{
+ DefaultRun: true,
+ },
+ wantModules: module.Registry{"module1": module.Creator{}},
+ },
+ "load all, module in config modules (default_run=false)": {
+ agent: Agent{
+ ModuleRegistry: module.Registry{
+ "module1": module.Creator{},
+ },
+ },
+ cfg: config{
+ Modules: map[string]bool{"module1": true},
+ },
+ wantModules: module.Registry{
+ "module1": module.Creator{},
+ },
+ },
+ "load all, module not in config modules (default_run=false)": {
+ agent: Agent{
+ ModuleRegistry: module.Registry{
+ "module1": module.Creator{},
+ },
+ },
+ wantModules: module.Registry{},
+ },
+ "load specific, module exist in registry": {
+ agent: Agent{
+ RunModule: "module1",
+ ModuleRegistry: module.Registry{
+ "module1": module.Creator{},
+ },
+ },
+ wantModules: module.Registry{
+ "module1": module.Creator{},
+ },
+ },
+ "load specific, module doesnt exist in registry": {
+ agent: Agent{
+ RunModule: "module3",
+ ModuleRegistry: module.Registry{},
+ },
+ wantModules: module.Registry{},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.wantModules, test.agent.loadEnabledModules(test.cfg))
+ })
+ }
+}
+
+// TODO: tech debt
+func TestAgent_buildDiscoveryConf(t *testing.T) {
+
+}
diff --git a/agent/testdata/agent-empty.conf b/agent/testdata/agent-empty.conf
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/agent/testdata/agent-empty.conf
diff --git a/agent/testdata/agent-invalid-syntax.conf b/agent/testdata/agent-invalid-syntax.conf
new file mode 100644
index 0000000..c4a0b91
--- /dev/null
+++ b/agent/testdata/agent-invalid-syntax.conf
@@ -0,0 +1,7 @@
+- enabled: yes
+default_run: yes
+max_procs: 1
+
+modules:
+ module1: yes
+ module2: yes
diff --git a/agent/testdata/agent-valid.conf b/agent/testdata/agent-valid.conf
new file mode 100644
index 0000000..ec5e1d0
--- /dev/null
+++ b/agent/testdata/agent-valid.conf
@@ -0,0 +1,7 @@
+enabled: yes
+default_run: yes
+max_procs: 1
+
+modules:
+ module1: yes
+ module2: yes
diff --git a/agent/ticker/ticker.go b/agent/ticker/ticker.go
new file mode 100644
index 0000000..88c6074
--- /dev/null
+++ b/agent/ticker/ticker.go
@@ -0,0 +1,53 @@
+package ticker
+
+import "time"
+
+type (
+ // Ticker holds a channel that delivers ticks of a clock at intervals.
+ // The ticks is aligned to interval boundaries.
+ Ticker struct {
+ C <-chan int
+ done chan struct{}
+ loops int
+ interval time.Duration
+ }
+)
+
+// New returns a new Ticker containing a channel that will send the time with a period specified by the duration argument.
+// It adjusts the intervals or drops ticks to make up for slow receivers.
+// The duration must be greater than zero; if not, New will panic. Stop the Ticker to release associated resources.
+func New(interval time.Duration) *Ticker {
+ ticker := &Ticker{
+ interval: interval,
+ done: make(chan struct{}, 1),
+ }
+ ticker.start()
+ return ticker
+}
+
+func (t *Ticker) start() {
+ ch := make(chan int)
+ t.C = ch
+ go func() {
+ LOOP:
+ for {
+ now := time.Now()
+ nextRun := now.Truncate(t.interval).Add(t.interval)
+
+ time.Sleep(nextRun.Sub(now))
+ select {
+ case <-t.done:
+ close(ch)
+ break LOOP
+ case ch <- t.loops:
+ t.loops++
+ }
+ }
+ }()
+}
+
+// Stop turns off a Ticker. After Stop, no more ticks will be sent.
+// Stop does not close the channel, to prevent a read from the channel succeeding incorrectly.
+func (t *Ticker) Stop() {
+ t.done <- struct{}{}
+}
diff --git a/agent/ticker/ticket_test.go b/agent/ticker/ticket_test.go
new file mode 100644
index 0000000..f1f3dc6
--- /dev/null
+++ b/agent/ticker/ticket_test.go
@@ -0,0 +1,47 @@
+package ticker
+
+import (
+ "testing"
+ "time"
+)
+
+var allowedDelta = 100 * time.Millisecond
+
+func TestTickerParallel(t *testing.T) {
+ for i := 0; i < 100; i++ {
+ i := i
+ go func() {
+ time.Sleep(time.Second / 100 * time.Duration(i))
+ TestTicker(t)
+ }()
+ }
+ time.Sleep(4 * time.Second)
+}
+
+func TestTicker(t *testing.T) {
+ tk := New(time.Second)
+ defer tk.Stop()
+ prev := time.Now()
+ for i := 0; i < 3; i++ {
+ <-tk.C
+ now := time.Now()
+ diff := abs(now.Round(time.Second).Sub(now))
+ if diff >= allowedDelta {
+ t.Errorf("Ticker is not aligned: expect delta < %v but was: %v (%s)", allowedDelta, diff, now.Format(time.RFC3339Nano))
+ }
+ if i > 0 {
+ dt := now.Sub(prev)
+ if abs(dt-time.Second) >= allowedDelta {
+ t.Errorf("Ticker interval: expect delta < %v ns but was: %v", allowedDelta, abs(dt-time.Second))
+ }
+ }
+ prev = now
+ }
+}
+
+func abs(a time.Duration) time.Duration {
+ if a < 0 {
+ return -a
+ }
+ return a
+}
diff --git a/cli/cli.go b/cli/cli.go
new file mode 100644
index 0000000..90c56fd
--- /dev/null
+++ b/cli/cli.go
@@ -0,0 +1,40 @@
+package cli
+
+import (
+ "strconv"
+
+ "github.com/jessevdk/go-flags"
+)
+
+// Option defines command line options.
+type Option struct {
+ UpdateEvery int
+ Module string `short:"m" long:"modules" description:"module name to run" default:"all"`
+ ConfDir []string `short:"c" long:"config-dir" description:"config dir to read"`
+ WatchPath []string `short:"w" long:"watch-path" description:"config path to watch"`
+ Debug bool `short:"d" long:"debug" description:"debug mode"`
+ Version bool `short:"v" long:"version" description:"display the version and exit"`
+}
+
+// Parse returns parsed command-line flags in Option struct
+func Parse(args []string) (*Option, error) {
+ opt := &Option{
+ UpdateEvery: 1,
+ }
+ parser := flags.NewParser(opt, flags.Default)
+ parser.Name = "orchestrator"
+ parser.Usage = "[OPTIONS] [update every]"
+
+ rest, err := parser.ParseArgs(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(rest) > 1 {
+ if opt.UpdateEvery, err = strconv.Atoi(rest[1]); err != nil {
+ return nil, err
+ }
+ }
+
+ return opt, nil
+}
diff --git a/cmd/godplugin/main.go b/cmd/godplugin/main.go
new file mode 100644
index 0000000..dbb1391
--- /dev/null
+++ b/cmd/godplugin/main.go
@@ -0,0 +1,129 @@
+package main
+
+import (
+ "fmt"
+ "os"
+ "os/user"
+ "path"
+ "strings"
+
+ "github.com/netdata/go.d.plugin/agent"
+ "github.com/netdata/go.d.plugin/cli"
+ "github.com/netdata/go.d.plugin/logger"
+ "github.com/netdata/go.d.plugin/pkg/multipath"
+
+ "github.com/jessevdk/go-flags"
+
+ _ "github.com/netdata/go.d.plugin/modules"
+)
+
+var (
+ cd, _ = os.Getwd()
+ name = "go.d"
+ userDir = os.Getenv("NETDATA_USER_CONFIG_DIR")
+ stockDir = os.Getenv("NETDATA_STOCK_CONFIG_DIR")
+ varLibDir = os.Getenv("NETDATA_LIB_DIR")
+ lockDir = os.Getenv("NETDATA_LOCK_DIR")
+ watchPath = os.Getenv("NETDATA_PLUGINS_GOD_WATCH_PATH")
+
+ version = "unknown"
+)
+
+func confDir(opts *cli.Option) multipath.MultiPath {
+ if len(opts.ConfDir) > 0 {
+ return opts.ConfDir
+ }
+ if userDir != "" || stockDir != "" {
+ return multipath.New(
+ userDir,
+ stockDir,
+ )
+ }
+ return multipath.New(
+ path.Join(cd, "/../../../../etc/netdata"),
+ path.Join(cd, "/../../../../usr/lib/netdata/conf.d"),
+ )
+}
+
+func modulesConfDir(opts *cli.Option) (mpath multipath.MultiPath) {
+ if len(opts.ConfDir) > 0 {
+ return opts.ConfDir
+ }
+ if userDir != "" || stockDir != "" {
+ if userDir != "" {
+ mpath = append(mpath, path.Join(userDir, name))
+ }
+ if stockDir != "" {
+ mpath = append(mpath, path.Join(stockDir, name))
+ }
+ return multipath.New(mpath...)
+ }
+ return multipath.New(
+ path.Join(cd, "/../../../../etc/netdata", name),
+ path.Join(cd, "/../../../../usr/lib/netdata/conf.d", name),
+ )
+}
+
+func watchPaths(opts *cli.Option) []string {
+ if watchPath == "" {
+ return opts.WatchPath
+ }
+ return append(opts.WatchPath, watchPath)
+}
+
+func stateFile() string {
+ if varLibDir == "" {
+ return ""
+ }
+ return path.Join(varLibDir, "god-jobs-statuses.json")
+}
+
+func init() {
+ // https://github.com/netdata/netdata/issues/8949#issuecomment-638294959
+ if v := os.Getenv("TZ"); strings.HasPrefix(v, ":") {
+ _ = os.Unsetenv("TZ")
+ }
+}
+
+func main() {
+ opts := parseCLI()
+
+ if opts.Version {
+ fmt.Println(fmt.Sprintf("go.d.plugin, version: %s", version))
+ return
+ }
+
+ if opts.Debug {
+ logger.SetSeverity(logger.DEBUG)
+ }
+
+ a := agent.New(agent.Config{
+ Name: name,
+ ConfDir: confDir(opts),
+ ModulesConfDir: modulesConfDir(opts),
+ ModulesSDConfPath: watchPaths(opts),
+ StateFile: stateFile(),
+ LockDir: lockDir,
+ RunModule: opts.Module,
+ MinUpdateEvery: opts.UpdateEvery,
+ })
+
+ a.Debugf("plugin: name=%s, version=%s", a.Name, version)
+ if u, err := user.Current(); err == nil {
+ a.Debugf("current user: name=%s, uid=%s", u.Username, u.Uid)
+ }
+
+ a.Run()
+}
+
+func parseCLI() *cli.Option {
+ opt, err := cli.Parse(os.Args)
+ if err != nil {
+ if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp {
+ os.Exit(0)
+ } else {
+ os.Exit(1)
+ }
+ }
+ return opt
+}
diff --git a/config/go.d.conf b/config/go.d.conf
new file mode 100644
index 0000000..a045bd8
--- /dev/null
+++ b/config/go.d.conf
@@ -0,0 +1,74 @@
+# netdata go.d.plugin configuration
+#
+# This file is in YAML format.
+
+# Enable/disable the whole go.d.plugin.
+enabled: yes
+
+# Enable/disable default value for all modules.
+default_run: yes
+
+# Maximum number of used CPUs. Zero means no limit.
+max_procs: 0
+
+# Enable/disable specific g.d.plugin module
+# If you want to change any value, you need to uncomment out it first.
+# IMPORTANT: Do not remove all spaces, just remove # symbol. There should be a space before module name.
+modules:
+# activemq: yes
+# apache: yes
+# bind: yes
+# cockroachdb: yes
+# consul: yes
+# coredns: yes
+# couchbase: yes
+# couchdb: yes
+# dnsdist: yes
+# dnsmasq: yes
+# dnsmasq_dhcp: yes
+# dns_query: yes
+# docker_engine: yes
+# dockerhub: yes
+# elasticsearch: yes
+# example: no
+# filecheck: yes
+# fluentd: yes
+# freeradius: yes
+# hdfs: yes
+# httpcheck: yes
+# isc_dhcpd: yes
+# k8s_kubelet: yes
+# k8s_kubeproxy: yes
+# lighttpd: yes
+# lighttpd2: yes
+# logstash: yes
+# mysql: yes
+# nginx: yes
+# nginxvts: yes
+# openvpn: yes
+# phpdaemon: yes
+# phpfpm: yes
+# pihole: yes
+# pika: yes
+# portcheck: yes
+# powerdns: yes
+# powerdns_recursor: yes
+# prometheus: yes
+# pulsar: yes
+# rabbitmq: yes
+# redis: yes
+# scaleio: yes
+# solr: yes
+# springboot2: yes
+# squidlog: yes
+# systemdunits: yes
+# tengine: yes
+# unbound: yes
+# vernemq: yes
+# vcsa: yes
+# vsphere: yes
+# web_log: yes
+# whoisquery: yes
+# wmi: yes
+# x509check: yes
+# zookeeper: yes
diff --git a/config/go.d/activemq.conf b/config/go.d/activemq.conf
new file mode 100644
index 0000000..20d80ae
--- /dev/null
+++ b/config/go.d/activemq.conf
@@ -0,0 +1,190 @@
+# netdata go.d.plugin configuration for activemq
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:80
+#
+# - webadmin
+# Webadmin root path.
+# Syntax:
+# webadmin: webadmin
+#
+# - max_queues
+# Queues processing/charting limit.
+# Syntax:
+# max_queues: 999
+#
+# - max_topics
+# Topics processing/charting limit.
+# Syntax:
+# max_topics: 999
+#
+# - queues_filter
+# Queues processing/charting filter.
+# Syntax:
+# queues_filter: pattern # Pattern syntax: simple patterns.
+#
+# - topics_filter
+# Topics processing/charting filter.
+# Syntax:
+# topics_filter: pattern # Pattern syntax: simple patterns.
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# Simple patterns syntax: https://docs.netdata.cloud/libnetdata/simple_pattern/
+#
+#
+# [ JOB defaults ]:
+# url: http://localhost:8161
+# timeout: 1
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+# max_queues: 50
+# max_topics: 50
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+# - webadmin
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ url: http://localhost:8161
+ webadmin: admin
diff --git a/config/go.d/apache.conf b/config/go.d/apache.conf
new file mode 100644
index 0000000..ac0d503
--- /dev/null
+++ b/config/go.d/apache.conf
@@ -0,0 +1,161 @@
+# netdata go.d.plugin configuration for apache
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:80
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# url: http://localhost/server-status?auto
+# timeout: 2
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ url: http://localhost/server-status?auto
+
+ - name: local
+ url: http://127.0.0.1/server-status?auto
diff --git a/config/go.d/bind.conf b/config/go.d/bind.conf
new file mode 100644
index 0000000..008f79b
--- /dev/null
+++ b/config/go.d/bind.conf
@@ -0,0 +1,170 @@
+# netdata go.d.plugin configuration for bind
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:80
+#
+# - permit_view
+# Bind view filter. Only permitted by filter views will be charted. Default: deny all.
+# Syntax:
+# permit_view: pattern # Pattern syntax: simple patterns.
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# Simple patterns syntax: https://docs.netdata.cloud/libnetdata/simple_pattern/
+#
+#
+# [ JOB defaults ]:
+# url: http://127.0.0.1:8653/json/v1
+# timeout: 2
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+# permit_view: "" (empty permit_view == deny all)
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ url: http://127.0.0.1:8653/json/v1
+
+ - name: local
+ url: http://127.0.0.1:8653/xml/v3
diff --git a/config/go.d/cockroachdb.conf b/config/go.d/cockroachdb.conf
new file mode 100644
index 0000000..5011a7e
--- /dev/null
+++ b/config/go.d/cockroachdb.conf
@@ -0,0 +1,161 @@
+# netdata go.d.plugin configuration for cockroachdb
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:80
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# url: http://127.0.0.1:8080/_status/vars
+# timeout: 2
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ url: http://localhost:8080/_status/vars
+
+ - name: local
+ url: http://127.0.0.1:8080/_status/vars
diff --git a/config/go.d/consul.conf b/config/go.d/consul.conf
new file mode 100644
index 0000000..cffce5a
--- /dev/null
+++ b/config/go.d/consul.conf
@@ -0,0 +1,179 @@
+# netdata go.d.plugin configuration for consul
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:80
+#
+# - acl_token
+# ACL token used in every request.
+# Syntax:
+# acl_token: token
+#
+# - max_checks
+# Checks processing/charting limit.
+# Syntax:
+# max_checks: 999
+#
+# - checks_filter
+# Checks processing/charting filter.
+# Syntax:
+# checks_filter: pattern # Pattern syntax: simple patterns.
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# Simple patterns syntax: https://docs.netdata.cloud/libnetdata/simple_pattern/
+#
+#
+# [ JOB defaults ]:
+# url: http://localhost:8500
+# timeout: 1
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ url: http://localhost:8500
+
+ - name: local
+ url: http://127.0.0.1:8500
diff --git a/config/go.d/coredns.conf b/config/go.d/coredns.conf
new file mode 100644
index 0000000..3fb40ef
--- /dev/null
+++ b/config/go.d/coredns.conf
@@ -0,0 +1,184 @@
+# netdata go.d.plugin configuration for coredns
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://127.0.0.1:9153/metrics
+#
+# - per_server_stats
+# Server filter. Module will collect server statistics if filter matches the server.
+# Filter logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+# Pattern syntax: https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format
+# Syntax:
+# per_server_stats:
+# includes:
+# - pattern1
+# - pattern2
+# excludes:
+# - pattern3
+# - pattern4
+#
+# - per_zone_stats
+# Zone filter. Module will collect zone statistics if filter matches the zone.
+# Filter logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+# Pattern syntax: https://github.com/netdata/go.d.plugin/tree/master/pkg/matcher#supported-format
+# Syntax:
+# per_zone_stats:
+# includes:
+# - pattern1
+# - pattern2
+# excludes:
+# - pattern3
+# - pattern4
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# url: http://127.0.0.1:9153/metrics
+# timeout: 2
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - url: http://127.0.0.1:9153/metrics
+ - url: http://kube-dns.kube-system.svc.cluster.local:9153/metrics
diff --git a/config/go.d/couchbase.conf b/config/go.d/couchbase.conf
new file mode 100644
index 0000000..b1747e5
--- /dev/null
+++ b/config/go.d/couchbase.conf
@@ -0,0 +1,96 @@
+# netdata go.d.plugin configuration for couchbase
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 10.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:8091
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: admin
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: 123456
+#
+#
+# [ JOB defaults ]:
+# url: http://127.0.0.1:8091
+# timeout: 10
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 10
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ url: http://127.0.0.1:8091
+ username: admin
+ password: password
diff --git a/config/go.d/couchdb.conf b/config/go.d/couchdb.conf
new file mode 100644
index 0000000..5c4a0b4
--- /dev/null
+++ b/config/go.d/couchdb.conf
@@ -0,0 +1,173 @@
+# netdata go.d.plugin configuration for couchdb
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 10.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:5984
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 5
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+# - node
+# CouchDB node name. Same as -name vm.args argument.
+# Syntax:
+# node: node@host
+#
+# - databases
+# List of database names for which db-specific stats should be displayed.
+# Syntax:
+# databases: plantdb animaldb
+#
+#
+# [ JOB defaults ]:
+# url: http://127.0.0.1:5984
+# node: _local
+# timeout: 5
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 10
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+# jobs:
+# - name: local
+# url: http://127.0.0.1:5984
+# node: node@host
+# username: admin
+# password: password
+# databases: my-db
diff --git a/config/go.d/dns_query.conf b/config/go.d/dns_query.conf
new file mode 100644
index 0000000..931b96e
--- /dev/null
+++ b/config/go.d/dns_query.conf
@@ -0,0 +1,118 @@
+# netdata go.d.plugin configuration for dns_query
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - domains
+# Domain or subdomains to query. Module choose random domain from the list on every iteration.
+# Syntax:
+# domains: [python.org, golang.org, ruby-lang.org]
+#
+# - servers
+# Servers to query.
+# servers: [8.8.8.8, 8.8.4.4]
+#
+# - port
+# DNS server port.
+# Syntax:
+# port: 53
+#
+# - network
+# Network protocol name. Available options: udp, tcp, tcp-tls. Default: udp.
+# Syntax:
+# network: udp
+#
+# - record_type
+# Query record type. Available options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, TXT, SRV. Default: A.
+# Syntax:
+# record_type: A
+#
+# - timeout
+# Query read timeout.
+# Syntax:
+# timeout: 2
+#
+#
+# [ JOB defaults ]:
+# port: 53
+# network: udp
+# record_type: A
+# timeout: 2
+# update_every: 5
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - domains
+# - servers
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+#jobs:
+# - name: example
+# domains:
+# - google.com
+# - github.com
+# - reddit.com
+# servers:
+# - 8.8.8.8
+# - 8.8.4.4
diff --git a/config/go.d/dnsdist.conf b/config/go.d/dnsdist.conf
new file mode 100644
index 0000000..70bfc1d
--- /dev/null
+++ b/config/go.d/dnsdist.conf
@@ -0,0 +1,165 @@
+# netdata go.d.plugin configuration for dnsdist
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+#
+# - url
+# Server URL.
+# Syntax:
+# url: http://127.0.0.1:5053
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP body to append.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+# [ JOB defaults ]:
+# url: http://127.0.0.1:5053
+# timeout: 1
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ url: http://127.0.0.1:8083
+ headers:
+ X-API-Key: 'dnsdist-api-key' # static pre-shared authentication key for access to the REST API (api-key).
+#
+# - name: remote
+# url: http://203.0.113.0:8083
+# headers:
+# X-API-Key: 'dnsdist-api-key' # static pre-shared authentication key for access to the REST API (api-key).
diff --git a/config/go.d/dnsmasq.conf b/config/go.d/dnsmasq.conf
new file mode 100644
index 0000000..9fcfe59
--- /dev/null
+++ b/config/go.d/dnsmasq.conf
@@ -0,0 +1,100 @@
+# netdata go.d.plugin configuration for dnsmasq
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - address
+# Server's address. Format is 'ip_address:port'.
+# Syntax:
+# address: 127.0.0.1:53
+#
+# - protocol
+# DNS query transport protocol. Valid options: udp, tcp, tcp-tls.
+# Syntax:
+# protocol: udp
+#
+# - timeout
+# DNS query timeout (dial, write and read) in seconds.
+# Syntax:
+# timeout: 1
+#
+#
+# [ JOB defaults ]:
+# address: 127.0.0.1:53
+# protocol: udp
+# timeout: 1
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - address
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ protocol: udp
+ address: '127.0.0.1:53'
+
+# - name: remote
+# protocol: udp
+# address: '203.0.113.0:53'
diff --git a/config/go.d/dnsmasq_dhcp.conf b/config/go.d/dnsmasq_dhcp.conf
new file mode 100644
index 0000000..3e40ad5
--- /dev/null
+++ b/config/go.d/dnsmasq_dhcp.conf
@@ -0,0 +1,103 @@
+# netdata go.d.plugin configuration for dnsmasq_dhcp
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - leases_path
+# Path to dnsmasq dhcp leases file.
+# Syntax:
+# url: /var/lib/misc/dnsmasq.dnsmasq.leases
+#
+# - conf_path
+# Path to dnsmasq configuration file.
+# Syntax:
+# url: /etc/dnsmasq.conf
+#
+# - conf_dir
+# Path to dnsmasq configuration directory.
+# Syntax:
+# url: /etc/dnsmasq.conf
+#
+#
+#
+# [ JOB defaults ]:
+# leases_path : /var/lib/misc/dnsmasq.dnsmasq.leases
+# conf_path : /etc/dnsmasq.conf
+# conf_dir : /etc/dnsmasq.d
+#
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name : dnsmasq_dhcp
+ leases_path : /var/lib/misc/dnsmasq.leases
+ conf_path : /etc/dnsmasq.conf
+ conf_dir : /etc/dnsmasq.d
+
+ - name : dnsmasq_dhcp
+ leases_path : /etc/pihole/dhcp.leases
+ conf_path : /etc/dnsmasq.conf
+ conf_dir : /etc/dnsmasq.d
diff --git a/config/go.d/docker_engine.conf b/config/go.d/docker_engine.conf
new file mode 100644
index 0000000..0ebebb0
--- /dev/null
+++ b/config/go.d/docker_engine.conf
@@ -0,0 +1,158 @@
+# netdata go.d.plugin configuration for docker_engine
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://127.0.0.1:9323/metrics
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# url: http://127.0.0.1:9323/metrics
+# timeout: 2
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ url: http://127.0.0.1:9323/metrics
diff --git a/config/go.d/dockerhub.conf b/config/go.d/dockerhub.conf
new file mode 100644
index 0000000..85806be
--- /dev/null
+++ b/config/go.d/dockerhub.conf
@@ -0,0 +1,164 @@
+# netdata go.d.plugin configuration for dockerhub
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - repositories
+# List of repositories to monitor.
+# Syntax:
+# repositories: ['user1/name1', 'user2/name2', 'user3/name3']
+#
+# - url
+# Server URL.
+# Syntax:
+# url: https://hub.docker.com/v2/repositories
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# url: https://hub.docker.com/v2/repositories
+# timeout: 2
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+# update_every: 5
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - repositories
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+#jobs:
+# - name: local
+# repositories: ['user1/name1', 'user2/name2', 'user3/name3']
diff --git a/config/go.d/elasticsearch.conf b/config/go.d/elasticsearch.conf
new file mode 100644
index 0000000..8b10e89
--- /dev/null
+++ b/config/go.d/elasticsearch.conf
@@ -0,0 +1,186 @@
+# netdata go.d.plugin configuration for elasticsearch
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:80
+#
+# - collect_node_stats
+# Collect local node metrics from '/_nodes/_local/stats' endpoint. Default is 'yes'.
+# Syntax:
+# collect_node_statistics: yes/no
+#
+# - collect_indices_stats
+# Collect local node indices metrics from '/_cat/indices?local=true' endpoint. Default is 'no'.
+# Syntax:
+# collect_indices_stats: yes/no
+#
+# - collect_cluster_health
+# Collect cluster health metrics from '/_cluster/health' endpoint. Default is 'yes'.
+# Syntax:
+# collect_cluster: yes/no
+#
+# - collect_cluster_stats
+# Collect cluster stats metrics from '/_cluster/stats' endpoint. Default is 'yes'.
+# Syntax:
+# collect_stats: yes/no
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# url: http://127.0.0.1:9200
+# timeout: 5
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+# collect_node_stats: yes
+# collect_indices_stats: no
+# collect_cluster_health: yes
+# collect_cluster_stats: yes
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 5
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ url: http://127.0.0.1:9200
+# collect_node_stats: yes
+# collect_cluster_health: yes
+# collect_cluster_stats: yes
+# collect_indices_stats: yes
diff --git a/config/go.d/energid.conf b/config/go.d/energid.conf
new file mode 100644
index 0000000..6271b59
--- /dev/null
+++ b/config/go.d/energid.conf
@@ -0,0 +1,163 @@
+# netdata go.d.plugin configuration for energid
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+#
+# - url
+# Server URL.
+# Syntax:
+# url: http://127.0.0.1:5053
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP body to append.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+# [ JOB defaults ]:
+# url: http://127.0.0.1:9796
+# timeout: 1
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+# [ JOBS ]
+#jobs:
+# - name: energi
+# url: http://127.0.0.1:9796
+# username: energy
+# password: energy
+#
+# - name: bitcoin
+# url: http://203.0.113.0:8332
+# username: bitcoin
+# password: bitcoin
diff --git a/config/go.d/example.conf b/config/go.d/example.conf
new file mode 100644
index 0000000..a2637b3
--- /dev/null
+++ b/config/go.d/example.conf
@@ -0,0 +1,94 @@
+# netdata go.d.plugin configuration for example
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - charts
+# Charts parameters.
+# Syntax:
+# charts:
+# type: <CHART_TYPE> (line, area, stacked)
+# num: <NUM_OF_CHARTS>
+# dimensions: <NUM_OF_DIMENSIONS>
+#
+# - hidden_charts
+# Hidden charts parameters.
+# Syntax:
+# charts:
+# type: <CHART_TYPE> (line, area, stacked)
+# num: <NUM_OF_CHARTS>
+# dimensions: <NUM_OF_DIMENSIONS>
+#
+#
+# [ JOB defaults ]:
+# charts:
+# num: 1
+# dimensions: 3
+#
+#
+# [ JOB mandatory parameters ]:
+# No parameters
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: example
diff --git a/config/go.d/filecheck.conf b/config/go.d/filecheck.conf
new file mode 100644
index 0000000..7a379d1
--- /dev/null
+++ b/config/go.d/filecheck.conf
@@ -0,0 +1,109 @@
+# netdata go.d.plugin configuration for filecheck
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - files
+# Files check parameters.
+# Syntax:
+# files:
+# include:
+# - '/path/to/file1'
+# - '/path/to/file2'
+#
+# - dirs
+# Directories check parameters.
+# Syntax:
+# dirs:
+# collect_dir_size: yes/no
+# include:
+# - '/path/to/dir1'
+# - '/path/to/dir2'
+#
+# - discovery_every
+# Files and directories discovery interval.
+# Syntax:
+# discovery_every: 60s
+#
+# [ JOB defaults ]:
+# update_every: 10
+# discovery_every: 30s
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 10
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+#jobs:
+# - name: files_example
+# files:
+# include:
+# - '/path/to/file1'
+# - '/path/to/file2'
+#
+# - name: dirs_example
+# dirs:
+# collect_dir_size: yes
+# include:
+# - '/path/to/dir1'
+# - '/path/to/dir2'
diff --git a/config/go.d/fluentd.conf b/config/go.d/fluentd.conf
new file mode 100644
index 0000000..e870a46
--- /dev/null
+++ b/config/go.d/fluentd.conf
@@ -0,0 +1,167 @@
+# netdata go.d.plugin configuration for fluentd
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:80
+#
+# - permit_plugin_id
+# Plugin filter. Only permitted by filter plugins will be charted. Default: allow all.
+# Syntax:
+# permit_plugin_id: pattern # Pattern syntax: simple patterns (https://docs.netdata.cloud/libnetdata/simple_pattern/).
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# url: http://127.0.0.1:24220
+# timeout: 2
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+# permit_plugin_id: "" (empty permit_plugin_id == allow all)
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ url: http://localhost:24220
+
+ - name: local
+ url: http://127.0.0.1:24220
diff --git a/config/go.d/freeradius.conf b/config/go.d/freeradius.conf
new file mode 100644
index 0000000..80463d2
--- /dev/null
+++ b/config/go.d/freeradius.conf
@@ -0,0 +1,101 @@
+# netdata go.d.plugin configuration for freeradius
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - address
+# Status server address.
+# Syntax:
+# address: 127.0.0.1
+#
+# - port
+# Status server port.
+# Syntax:
+# port: 18121
+#
+# - secret
+# Status server secret.
+# Syntax:
+# secret: adminsecret
+#
+#
+# [ JOB defaults ]:
+# address: 127.0.0.1
+# port: 18121
+# secret: adminsecret
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ address: localhost
+ port: 18121
+ secret: adminsecret
+
+ - name: local
+ address: 127.0.0.1
+ port: 18121
+ secret: adminsecret
diff --git a/config/go.d/hdfs.conf b/config/go.d/hdfs.conf
new file mode 100644
index 0000000..0010e50
--- /dev/null
+++ b/config/go.d/hdfs.conf
@@ -0,0 +1,160 @@
+# netdata go.d.plugin configuration for hadoop hdfs
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:80
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# timeout: 1
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+#jobs:
+# - name: namenode
+# url: http://127.0.0.1:9870/jmx
+#
+# - name: datanode
+# url: http://127.0.0.1:9864/jmx
diff --git a/config/go.d/httpcheck.conf b/config/go.d/httpcheck.conf
new file mode 100644
index 0000000..2f8accf
--- /dev/null
+++ b/config/go.d/httpcheck.conf
@@ -0,0 +1,176 @@
+# netdata go.d.plugin configuration for httpcheck
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:80
+#
+# - status_accepted
+# HTTP accepted response statuses. Anything else will result in 'bad status' in the status chart.
+# Syntax:
+# status_accepted: [200, 300, 400]
+#
+# - response_match
+# If the status code is accepted, the content of the response will be searched for this regex.
+# Syntax:
+# response_match: pattern # Pattern syntax: regular expression.
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# status_accepted : [200]
+# timeout : 1
+# method : GET
+# not_follow_redirects : no
+# tls_skip_verify : no
+# update_every : 5
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+#update_every : 1
+#autodetection_retry : 0
+#priority : 70000
+#
+#
+# [ JOBS ]
+#jobs:
+# - name: jira
+# url: https://jira.localdomain/
+#
+# - name: cool_website
+# url: http://cool.website:8080/home
+# status_accepted: [200, 204]
+# response_match: <title>My cool website!<\/title>
+# timeout: 2
diff --git a/config/go.d/isc_dhcpd.conf b/config/go.d/isc_dhcpd.conf
new file mode 100644
index 0000000..0675ac4
--- /dev/null
+++ b/config/go.d/isc_dhcpd.conf
@@ -0,0 +1,111 @@
+# netdata go.d.plugin configuration for ISC dhcpd
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - leases_path
+# Absolute path to the DHCP client lease database (dhcpd.leases).
+# Syntax:
+# leases_path: '/path/to/dhcpd.leases'
+#
+# - pools
+# List of IP pools to monitor.
+# <IP_RANGE> syntax: https://github.com/netdata/go.d.plugin/tree/master/pkg/iprange#supported-formats
+# Syntax:
+# pools:
+# - name: <POOL_NAME>
+# networks: '<A_SPACE_SEPARATED_LIST_OF_IP_RANGES>'
+# - name: <POOL_NAME>
+# networks: '<A_SPACE_SEPARATED_LIST_OF_IP_RANGES>'
+#
+#
+# [ JOB defaults ]:
+# leases_path: '/var/lib/dhcp/dhcpd.leases'
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - leases_path
+# - pools
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+#jobs:
+# - name: ipv4_example
+# leases_path: '/path/to/dhcpd.leases_ipv4'
+# pools:
+# - name: office
+# networks: '192.0.2.1-192.0.2.254'
+# - name: wifi
+# networks: '198.51.100.0/24'
+# - name: dmz
+# networks: '203.0.113.0/255.255.255.0'
+#
+# - name: ipv6_example
+# leases_path: '/path/to/dhcpd.leases_ipv6'
+# pools:
+# - name: office
+# networks: '2001:db8::/64'
+# - name: wifi
+# networks: '2001:db8:0:1::/64'
+# - name: dmz
+# networks: '2001:db8:0:2::/64'
diff --git a/config/go.d/k8s_kubelet.conf b/config/go.d/k8s_kubelet.conf
new file mode 100644
index 0000000..f823c8b
--- /dev/null
+++ b/config/go.d/k8s_kubelet.conf
@@ -0,0 +1,159 @@
+# netdata go.d.plugin configuration for k8s_kubelet
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://127.0.0.1:10255/metrics
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# url: http://127.0.0.1:10255/metrics
+# timeout: 2
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - url: http://127.0.0.1:10255/metrics
+ - url: https://localhost:10250/metrics
+ tls_skip_verify: yes
diff --git a/config/go.d/k8s_kubeproxy.conf b/config/go.d/k8s_kubeproxy.conf
new file mode 100644
index 0000000..861999a
--- /dev/null
+++ b/config/go.d/k8s_kubeproxy.conf
@@ -0,0 +1,156 @@
+# netdata go.d.plugin configuration for k8s_kubeproxy
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://127.0.0.1:10249/metrics
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# url: http://127.0.0.1:10249/metrics
+# timeout: 2
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+update_every: 1
+autodetection_retry: 0
+#
+#
+# [ JOBS ]
+jobs:
+ - url: http://127.0.0.1:10249/metrics
diff --git a/config/go.d/lighttpd.conf b/config/go.d/lighttpd.conf
new file mode 100644
index 0000000..127282f
--- /dev/null
+++ b/config/go.d/lighttpd.conf
@@ -0,0 +1,161 @@
+# netdata go.d.plugin configuration for lighttpd
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:80
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# url: http://localhost/server-status?auto
+# timeout: 2
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ url: http://localhost/server-status?auto
+
+ - name: local
+ url: http://127.0.0.1/server-status?auto
diff --git a/config/go.d/lighttpd2.conf b/config/go.d/lighttpd2.conf
new file mode 100644
index 0000000..cc6dd79
--- /dev/null
+++ b/config/go.d/lighttpd2.conf
@@ -0,0 +1,161 @@
+# netdata go.d.plugin configuration for lighttpd2
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:80
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# url: http://localhost/server-status?auto
+# timeout: 2
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ url: http://localhost/server-status?format=plain
+
+ - name: local
+ url: http://127.0.0.1/server-status?format=plain
diff --git a/config/go.d/logstash.conf b/config/go.d/logstash.conf
new file mode 100644
index 0000000..5c2ec70
--- /dev/null
+++ b/config/go.d/logstash.conf
@@ -0,0 +1,161 @@
+# netdata go.d.plugin configuration for logstash
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:80
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# url: http://localhost:9600
+# timeout: 1
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ url: http://localhost:9600
+
+ - name: local
+ url: http://127.0.0.1:9600
diff --git a/config/go.d/mysql.conf b/config/go.d/mysql.conf
new file mode 100644
index 0000000..01e1b10
--- /dev/null
+++ b/config/go.d/mysql.conf
@@ -0,0 +1,136 @@
+# netdata go.d.plugin configuration for mysql
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - dsn
+# Data Source Name.
+# Format: [username[:password]@][protocol[(address)]]/dbname[?param1=value1&...&paramN=valueN]
+# Except for the databasename, all values are optional. So the minimal DSN is: /dbname
+# If you do not want to preselect a database, leave dbname empty: /
+# Full description: https://github.com/go-sql-driver/mysql#dsn-data-source-name
+# Syntax:
+# dsn: netdata@tcp(127.0.0.1:3306)/
+#
+# - my.cnf
+# Specifies my.cnf file to read connection parameters from under the [client] section
+# Syntax:
+# my.cnf: '/etc/my.cnf'
+#
+#
+# [ JOB defaults ]:
+# No parameters
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - dsn
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ # my.cnf
+ - name: local
+ my.cnf: '/etc/my.cnf'
+
+ - name: local
+ my.cnf: '/etc/mysql/my.cnf'
+
+ - name: local
+ my.cnf: '/etc/mysql/debian.cnf'
+
+ # root
+ - name: local
+ dsn: root@unix(/var/run/mysqld/mysqld.sock)/
+
+ - name: local
+ dsn: root@unix(/var/run/mysqld/mysql.sock)/
+
+ - name: local
+ dsn: root@unix(/var/lib/mysql/mysql.sock)/
+
+ - name: local
+ dsn: root@unix(/tmp/mysql.sock)/
+
+ - name: local
+ dsn: root@tcp(127.0.0.1:3306)/
+
+ - name: local
+ dsn: root@tcp([::1]:3306)/
+
+ # netdata
+ - name: local
+ dsn: netdata@unix(/var/run/mysqld/mysqld.sock)/
+
+ - name: local
+ dsn: netdata@unix(/var/run/mysqld/mysql.sock)/
+
+ - name: local
+ dsn: netdata@unix(/var/lib/mysql/mysql.sock)/
+
+ - name: local
+ dsn: netdata@unix(/tmp/mysql.sock)/
+
+ - name: local
+ dsn: netdata@tcp(127.0.0.1:3306)/
+
+ - name: local
+ dsn: netdata@tcp([::1]:3306)/
diff --git a/config/go.d/nginx.conf b/config/go.d/nginx.conf
new file mode 100644
index 0000000..2f227fa
--- /dev/null
+++ b/config/go.d/nginx.conf
@@ -0,0 +1,161 @@
+# netdata go.d.plugin configuration for nginx
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:80
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax:
+# tls_ca: path/to/ca.pem
+#
+# - tls_cert
+# Client tls certificate.
+# Syntax:
+# tls_cert: path/to/cert.pem
+#
+# - tls_key
+# Client tls key.
+# Syntax:
+# tls_key: path/to/key.pem
+#
+#
+# [ JOB defaults ]:
+# url: http://localhost/stub_status
+# timeout: 1
+# method: GET
+# not_follow_redirects: no
+# tls_skip_verify: no
+#
+#
+# [ JOB mandatory parameters ]:
+# - name
+# - url
+#
+# ------------------------------------------------MODULE-CONFIGURATION--------------------------------------------------
+# [ GLOBAL ]
+# update_every: 1
+# autodetection_retry: 0
+# priority: 70000
+#
+#
+# [ JOBS ]
+jobs:
+ - name: local
+ url: http://localhost/stub_status
+
+ - name: local
+ url: http://127.0.0.1/stub_status
diff --git a/config/go.d/nginxvts.conf b/config/go.d/nginxvts.conf
new file mode 100644
index 0000000..bc452df
--- /dev/null
+++ b/config/go.d/nginxvts.conf
@@ -0,0 +1,160 @@
+# netdata go.d.plugin configuration for nginxvts
+#
+# This file is in YAML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - GLOBAL
+# - JOBS
+#
+#
+# [ GLOBAL ]
+# These variables set the defaults for all JOBs, however each JOB may define its own, overriding the defaults.
+#
+# The GLOBAL section format:
+# param1: value1
+# param2: value2
+#
+# Currently supported global parameters:
+# - update_every
+# Data collection frequency in seconds. Default: 1.
+#
+# - autodetection_retry
+# Re-check interval in seconds. Attempts to start the job are made once every interval.
+# Zero means not to schedule re-check. Default: 0.
+#
+# - priority
+# Priority is the relative priority of the charts as rendered on the web page,
+# lower numbers make the charts appear before the ones with higher numbers. Default: 70000.
+#
+#
+# [ JOBS ]
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# IMPORTANT:
+# - Parameter 'name' is mandatory.
+# - Jobs with the same name are mutually exclusive. Only one of them will be allowed running at any time.
+#
+# This allows autodetection to try several alternatives and pick the one that works.
+# Any number of jobs is supported.
+#
+# The JOBS section format:
+#
+# jobs:
+# - name: job1
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+# param2: value2
+#
+# - name: job2
+# param1: value1
+#
+#
+# [ List of JOB specific parameters ]:
+# - url
+# Server URL.
+# Syntax:
+# url: http://localhost:80
+#
+# - username
+# Username for basic HTTP authentication.
+# Syntax:
+# username: tony
+#
+# - password
+# Password for basic HTTP authentication.
+# Syntax:
+# password: stark
+#
+# - proxy_url
+# Proxy URL.
+# Syntax:
+# proxy_url: http://localhost:3128
+#
+# - proxy_username
+# Username for proxy basic HTTP authentication.
+# Syntax:
+# username: bruce
+#
+# - proxy_password
+# Password for proxy basic HTTP authentication.
+# Syntax:
+# username: wayne
+#
+# - timeout
+# HTTP response timeout.
+# Syntax:
+# timeout: 1
+#
+# - method
+# HTTP request method.
+# Syntax:
+# method: GET
+#
+# - body
+# HTTP request method.
+# Syntax:
+# body: '{fake: data}'
+#
+# - headers
+# HTTP request headers.
+# Syntax:
+# headers:
+# X-API-Key: key
+#
+# - not_follow_redirects
+# Whether to not follow redirects from the server.
+# Syntax:
+# not_follow_redirects: yes/no
+#
+# - tls_skip_verify
+# Whether to skip verifying server's certificate chain and hostname.
+# Syntax:
+# tls_skip_verify: yes/no
+#
+# - tls_ca
+# Certificate authority that client use when verifying server certificates.
+# Syntax: