summaryrefslogtreecommitdiffstats
path: root/exporting
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 11:19:16 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 12:07:37 +0000
commitb485aab7e71c1625cfc27e0f92c9509f42378458 (patch)
treeae9abe108601079d1679194de237c9a435ae5b55 /exporting
parentAdding upstream version 1.44.3. (diff)
downloadnetdata-upstream.tar.xz
netdata-upstream.zip
Adding upstream version 1.45.3+dfsg.upstream/1.45.3+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--exporting/Makefile.am29
-rw-r--r--exporting/README.md327
-rw-r--r--exporting/aws_kinesis/Makefile.am8
-rw-r--r--exporting/aws_kinesis/metadata.yaml173
-rw-r--r--exporting/graphite/Makefile.am4
-rw-r--r--exporting/graphite/integrations/influxdb.md172
-rw-r--r--exporting/graphite/metadata.yaml212
-rw-r--r--exporting/json/Makefile.am4
-rw-r--r--exporting/json/json.c349
-rw-r--r--exporting/json/metadata.yaml151
-rw-r--r--exporting/mongodb/Makefile.am8
-rw-r--r--exporting/mongodb/integrations/mongodb.md145
-rw-r--r--exporting/mongodb/metadata.yaml151
-rw-r--r--exporting/opentsdb/Makefile.am8
-rw-r--r--exporting/opentsdb/metadata.yaml176
-rw-r--r--exporting/prometheus/Makefile.am12
-rw-r--r--exporting/prometheus/README.md361
-rw-r--r--exporting/prometheus/integrations/elasticsearch.md158
-rw-r--r--exporting/prometheus/integrations/kafka.md158
-rw-r--r--exporting/prometheus/integrations/new_relic.md158
-rw-r--r--exporting/prometheus/integrations/postgresql.md158
-rw-r--r--exporting/prometheus/metadata.yaml436
-rw-r--r--exporting/prometheus/remote_write/Makefile.am14
-rw-r--r--exporting/pubsub/Makefile.am8
-rw-r--r--exporting/pubsub/metadata.yaml152
-rw-r--r--exporting/tests/Makefile.am4
-rw-r--r--exporting/tests/exporting_doubles.c405
-rw-r--r--exporting/tests/exporting_fixtures.c184
-rw-r--r--exporting/tests/netdata_doubles.c255
-rw-r--r--exporting/tests/system_doubles.c61
-rw-r--r--exporting/tests/test_exporting_engine.c2068
-rw-r--r--exporting/tests/test_exporting_engine.h209
-rw-r--r--src/exporting/TIMESCALE.md (renamed from exporting/TIMESCALE.md)2
-rw-r--r--src/exporting/WALKTHROUGH.md (renamed from exporting/WALKTHROUGH.md)4
l---------src/exporting/aws_kinesis/README.md (renamed from exporting/aws_kinesis/README.md)0
-rw-r--r--src/exporting/aws_kinesis/aws_kinesis.c (renamed from exporting/aws_kinesis/aws_kinesis.c)0
-rw-r--r--src/exporting/aws_kinesis/aws_kinesis.h (renamed from exporting/aws_kinesis/aws_kinesis.h)0
-rw-r--r--src/exporting/aws_kinesis/aws_kinesis_put_record.cc (renamed from exporting/aws_kinesis/aws_kinesis_put_record.cc)0
-rw-r--r--src/exporting/aws_kinesis/aws_kinesis_put_record.h (renamed from exporting/aws_kinesis/aws_kinesis_put_record.h)0
-rw-r--r--src/exporting/aws_kinesis/integrations/aws_kinesis.md (renamed from exporting/aws_kinesis/integrations/aws_kinesis.md)10
-rw-r--r--src/exporting/check_filters.c (renamed from exporting/check_filters.c)0
-rw-r--r--src/exporting/clean_connectors.c (renamed from exporting/clean_connectors.c)0
-rw-r--r--src/exporting/exporting.conf (renamed from exporting/exporting.conf)0
-rw-r--r--src/exporting/exporting_engine.c (renamed from exporting/exporting_engine.c)10
-rw-r--r--src/exporting/exporting_engine.h (renamed from exporting/exporting_engine.h)2
l---------src/exporting/graphite/README.md (renamed from exporting/graphite/README.md)0
-rw-r--r--src/exporting/graphite/graphite.c (renamed from exporting/graphite/graphite.c)8
-rw-r--r--src/exporting/graphite/graphite.h (renamed from exporting/graphite/graphite.h)0
-rw-r--r--src/exporting/graphite/integrations/blueflood.md (renamed from exporting/graphite/integrations/blueflood.md)10
-rw-r--r--src/exporting/graphite/integrations/graphite.md (renamed from exporting/graphite/integrations/graphite.md)10
-rw-r--r--src/exporting/graphite/integrations/kairosdb.md (renamed from exporting/graphite/integrations/kairosdb.md)10
-rw-r--r--src/exporting/init_connectors.c (renamed from exporting/init_connectors.c)13
l---------src/exporting/json/README.md (renamed from exporting/json/README.md)0
-rw-r--r--src/exporting/json/integrations/json.md (renamed from exporting/json/integrations/json.md)10
-rw-r--r--src/exporting/json/json.h (renamed from exporting/json/json.h)0
l---------src/exporting/mongodb/README.md (renamed from exporting/mongodb/README.md)0
-rw-r--r--src/exporting/mongodb/mongodb.c (renamed from exporting/mongodb/mongodb.c)0
-rw-r--r--src/exporting/mongodb/mongodb.h (renamed from exporting/mongodb/mongodb.h)0
-rwxr-xr-xsrc/exporting/nc-exporting.sh (renamed from exporting/nc-exporting.sh)0
l---------src/exporting/opentsdb/README.md (renamed from exporting/opentsdb/README.md)0
-rw-r--r--src/exporting/opentsdb/integrations/opentsdb.md (renamed from exporting/opentsdb/integrations/opentsdb.md)10
-rw-r--r--src/exporting/opentsdb/opentsdb.c (renamed from exporting/opentsdb/opentsdb.c)16
-rw-r--r--src/exporting/opentsdb/opentsdb.h (renamed from exporting/opentsdb/opentsdb.h)0
-rw-r--r--src/exporting/process_data.c (renamed from exporting/process_data.c)6
-rw-r--r--src/exporting/prometheus/integrations/appoptics.md (renamed from exporting/prometheus/integrations/appoptics.md)10
-rw-r--r--src/exporting/prometheus/integrations/azure_data_explorer.md (renamed from exporting/prometheus/integrations/azure_data_explorer.md)10
-rw-r--r--src/exporting/prometheus/integrations/azure_event_hub.md (renamed from exporting/prometheus/integrations/azure_event_hub.md)10
-rw-r--r--src/exporting/prometheus/integrations/chronix.md (renamed from exporting/prometheus/integrations/chronix.md)10
-rw-r--r--src/exporting/prometheus/integrations/cortex.md (renamed from exporting/prometheus/integrations/cortex.md)10
-rw-r--r--src/exporting/prometheus/integrations/cratedb.md (renamed from exporting/prometheus/integrations/cratedb.md)10
-rw-r--r--src/exporting/prometheus/integrations/gnocchi.md (renamed from exporting/prometheus/integrations/gnocchi.md)10
-rw-r--r--src/exporting/prometheus/integrations/google_bigquery.md (renamed from exporting/prometheus/integrations/google_bigquery.md)10
-rw-r--r--src/exporting/prometheus/integrations/irondb.md (renamed from exporting/prometheus/integrations/irondb.md)10
-rw-r--r--src/exporting/prometheus/integrations/m3db.md (renamed from exporting/prometheus/integrations/m3db.md)10
-rw-r--r--src/exporting/prometheus/integrations/metricfire.md (renamed from exporting/prometheus/integrations/metricfire.md)10
-rw-r--r--src/exporting/prometheus/integrations/prometheus_remote_write.md (renamed from exporting/prometheus/integrations/prometheus_remote_write.md)10
-rw-r--r--src/exporting/prometheus/integrations/quasardb.md (renamed from exporting/prometheus/integrations/quasardb.md)10
-rw-r--r--src/exporting/prometheus/integrations/splunk_signalfx.md (renamed from exporting/prometheus/integrations/splunk_signalfx.md)10
-rw-r--r--src/exporting/prometheus/integrations/thanos.md (renamed from exporting/prometheus/integrations/thanos.md)10
-rw-r--r--src/exporting/prometheus/integrations/tikv.md (renamed from exporting/prometheus/integrations/tikv.md)10
-rw-r--r--src/exporting/prometheus/integrations/timescaledb.md (renamed from exporting/prometheus/integrations/timescaledb.md)10
-rw-r--r--src/exporting/prometheus/integrations/victoriametrics.md (renamed from exporting/prometheus/integrations/victoriametrics.md)10
-rw-r--r--src/exporting/prometheus/integrations/vmware_aria.md (renamed from exporting/prometheus/integrations/vmware_aria.md)10
-rw-r--r--src/exporting/prometheus/integrations/wavefront.md (renamed from exporting/prometheus/integrations/wavefront.md)10
-rw-r--r--src/exporting/prometheus/prometheus.c (renamed from exporting/prometheus/prometheus.c)170
-rw-r--r--src/exporting/prometheus/prometheus.h (renamed from exporting/prometheus/prometheus.h)2
l---------src/exporting/prometheus/remote_write/README.md (renamed from exporting/prometheus/remote_write/README.md)0
-rw-r--r--src/exporting/prometheus/remote_write/remote_write.c (renamed from exporting/prometheus/remote_write/remote_write.c)28
-rw-r--r--src/exporting/prometheus/remote_write/remote_write.h (renamed from exporting/prometheus/remote_write/remote_write.h)0
-rw-r--r--src/exporting/prometheus/remote_write/remote_write.proto (renamed from exporting/prometheus/remote_write/remote_write.proto)0
-rw-r--r--src/exporting/prometheus/remote_write/remote_write_request.cc (renamed from exporting/prometheus/remote_write/remote_write_request.cc)0
-rw-r--r--src/exporting/prometheus/remote_write/remote_write_request.h (renamed from exporting/prometheus/remote_write/remote_write_request.h)0
l---------src/exporting/pubsub/README.md (renamed from exporting/pubsub/README.md)0
-rw-r--r--src/exporting/pubsub/integrations/google_cloud_pub_sub.md (renamed from exporting/pubsub/integrations/google_cloud_pub_sub.md)10
-rw-r--r--src/exporting/pubsub/pubsub.c (renamed from exporting/pubsub/pubsub.c)0
-rw-r--r--src/exporting/pubsub/pubsub.h (renamed from exporting/pubsub/pubsub.h)0
-rw-r--r--src/exporting/pubsub/pubsub_publish.cc (renamed from exporting/pubsub/pubsub_publish.cc)0
-rw-r--r--src/exporting/pubsub/pubsub_publish.h (renamed from exporting/pubsub/pubsub_publish.h)0
-rw-r--r--src/exporting/read_config.c (renamed from exporting/read_config.c)0
-rw-r--r--src/exporting/sample-metadata.yaml (renamed from exporting/sample-metadata.yaml)0
-rw-r--r--src/exporting/send_data.c (renamed from exporting/send_data.c)4
-rw-r--r--src/exporting/send_internal_metrics.c (renamed from exporting/send_internal_metrics.c)0
102 files changed, 300 insertions, 6953 deletions
diff --git a/exporting/Makefile.am b/exporting/Makefile.am
deleted file mode 100644
index 41fcac0bd..000000000
--- a/exporting/Makefile.am
+++ /dev/null
@@ -1,29 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-SUBDIRS = \
- tests \
- graphite \
- json \
- opentsdb \
- prometheus \
- aws_kinesis \
- pubsub \
- mongodb \
- $(NULL)
-
-dist_libconfig_DATA = \
- exporting.conf \
- $(NULL)
-
-dist_noinst_DATA = \
- README.md \
- TIMESCALE.md \
- WALKTHROUGH.md \
- $(NULL)
-
-dist_noinst_SCRIPTS = \
- nc-exporting.sh \
- $(NULL)
diff --git a/exporting/README.md b/exporting/README.md
deleted file mode 100644
index 8a52968ee..000000000
--- a/exporting/README.md
+++ /dev/null
@@ -1,327 +0,0 @@
-<!--
-title: "Exporting reference"
-description: "With the exporting engine, you can archive your Netdata metrics to multiple external databases for long-term storage or further analysis."
-sidebar_label: "Export"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/README.md"
-learn_status: "Published"
-learn_rel_path: "Integrations/Export"
-learn_doc_purpose: "Explain the exporting engine options and all of our the exporting connectors options"
--->
-
-# Exporting reference
-
-Welcome to the exporting engine reference guide. This guide contains comprehensive information about enabling,
-configuring, and monitoring Netdata's exporting engine, which allows you to send metrics to external time-series
-databases.
-
-For a quick introduction to the exporting engine's features, read our doc on [exporting metrics to time-series
-databases](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md), or jump in to [enabling a connector](https://github.com/netdata/netdata/blob/master/docs/export/enable-connector.md).
-
-The exporting engine has a modular structure and supports metric exporting via multiple exporting connector instances at
-the same time. You can have different update intervals and filters configured for every exporting connector instance.
-
-When you enable the exporting engine and a connector, the Netdata Agent exports metrics _beginning from the time you
-restart its process_, not the entire [database of long-term metrics](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md).
-
-Since Netdata collects thousands of metrics per server per second, which would easily congest any database server when
-several Netdata servers are sending data to it, Netdata allows sending metrics at a lower frequency, by resampling them.
-
-So, although Netdata collects metrics every second, it can send to the external database servers averages or sums every
-X seconds (though, it can send them per second if you need it to).
-
-## Features
-
-### Integration
-
-The exporting engine uses a number of connectors to send Netdata metrics to external time-series databases. See our
-[list of supported databases](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md#supported-databases) for information on which
-connector to enable and configure for your database of choice.
-
-- [**AWS Kinesis Data Streams**](https://github.com/netdata/netdata/blob/master/exporting/aws_kinesis/README.md): Metrics are sent to the service in `JSON`
- format.
-- [**Google Cloud Pub/Sub Service**](https://github.com/netdata/netdata/blob/master/exporting/pubsub/README.md): Metrics are sent to the service in `JSON`
- format.
-- [**Graphite**](https://github.com/netdata/netdata/blob/master/exporting/graphite/README.md): A plaintext interface. Metrics are sent to the database server as
- `prefix.hostname.chart.dimension`. `prefix` is configured below, `hostname` is the hostname of the machine (can
- also be configured). Learn more in our guide to [export and visualize Netdata metrics in
- Graphite](https://github.com/netdata/netdata/blob/master/exporting/graphite/README.md).
-- [**JSON** document databases](https://github.com/netdata/netdata/blob/master/exporting/json/README.md)
-- [**OpenTSDB**](https://github.com/netdata/netdata/blob/master/exporting/opentsdb/README.md): Use a plaintext or HTTP interfaces. Metrics are sent to
- OpenTSDB as `prefix.chart.dimension` with tag `host=hostname`.
-- [**MongoDB**](https://github.com/netdata/netdata/blob/master/exporting/mongodb/README.md): Metrics are sent to the database in `JSON` format.
-- [**Prometheus**](https://github.com/netdata/netdata/blob/master/exporting/prometheus/README.md): Use an existing Prometheus installation to scrape metrics
- from node using the Netdata API.
-- [**Prometheus remote write**](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md). A binary snappy-compressed protocol
- buffer encoding over HTTP. Supports many [storage
- providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
-- [**TimescaleDB**](https://github.com/netdata/netdata/blob/master/exporting/TIMESCALE.md): Use a community-built connector that takes JSON streams from a
- Netdata client and writes them to a TimescaleDB table.
-
-### Chart filtering
-
-Netdata can filter metrics, to send only a subset of the collected metrics. You can use the
-configuration file
-
-```txt
-[prometheus:exporter]
- send charts matching = system.*
-```
-
-or the URL parameter `filter` in the `allmetrics` API call.
-
-```txt
-http://localhost:19999/api/v1/allmetrics?format=shell&filter=system.*
-```
-
-### Operation modes
-
-Netdata supports three modes of operation for all exporting connectors:
-
-- `as-collected` sends to external databases the metrics as they are collected, in the units they are collected.
- So, counters are sent as counters and gauges are sent as gauges, much like all data collectors do. For example,
- to calculate CPU utilization in this format, you need to know how to convert kernel ticks to percentage.
-
-- `average` sends to external databases normalized metrics from the Netdata database. In this mode, all metrics
- are sent as gauges, in the units Netdata uses. This abstracts data collection and simplifies visualization, but
- you will not be able to copy and paste queries from other sources to convert units. For example, CPU utilization
- percentage is calculated by Netdata, so Netdata will convert ticks to percentage and send the average percentage
- to the external database.
-
-- `sum` or `volume`: the sum of the interpolated values shown on the Netdata graphs is sent to the external
- database. So, if Netdata is configured to send data to the database every 10 seconds, the sum of the 10 values
- shown on the Netdata charts will be used.
-
-Time-series databases suggest to collect the raw values (`as-collected`). If you plan to invest on building your
-monitoring around a time-series database and you already know (or you will invest in learning) how to convert units
-and normalize the metrics in Grafana or other visualization tools, we suggest to use `as-collected`.
-
-If, on the other hand, you just need long term archiving of Netdata metrics and you plan to mainly work with
-Netdata, we suggest to use `average`. It decouples visualization from data collection, so it will generally be a lot
-simpler. Furthermore, if you use `average`, the charts shown in the external service will match exactly what you
-see in Netdata, which is not necessarily true for the other modes of operation.
-
-### Independent operation
-
-This code is smart enough, not to slow down Netdata, independently of the speed of the external database server.
-
-> ❗ You should keep in mind though that many exporting connector instances can consume a lot of CPU resources if they
-> run their batches at the same time. You can set different update intervals for every exporting connector instance,
-> but even in that case they can occasionally synchronize their batches for a moment.
-
-## Configuration
-
-Here are the configuration blocks for every supported connector. Your current `exporting.conf` file may look a little
-different.
-
-You can configure each connector individually using the available [options](#options). The
-`[graphite:my_graphite_instance]` block contains examples of some of these additional options in action.
-
-```conf
-[exporting:global]
- enabled = yes
- send configured labels = no
- send automatic labels = no
- update every = 10
-
-[prometheus:exporter]
- send names instead of ids = yes
- send configured labels = yes
- send automatic labels = no
- send charts matching = *
- send hosts matching = localhost *
- prefix = netdata
-
-[graphite:my_graphite_instance]
- enabled = yes
- destination = localhost:2003
- data source = average
- prefix = Netdata
- hostname = my-name
- update every = 10
- buffer on failures = 10
- timeout ms = 20000
- send charts matching = *
- send hosts matching = localhost *
- send names instead of ids = yes
- send configured labels = yes
- send automatic labels = yes
-
-[prometheus_remote_write:my_prometheus_remote_write_instance]
- enabled = yes
- destination = localhost
- remote write URL path = /receive
-
-[kinesis:my_kinesis_instance]
- enabled = yes
- destination = us-east-1
- stream name = netdata
- aws_access_key_id = my_access_key_id
- aws_secret_access_key = my_aws_secret_access_key
-
-[pubsub:my_pubsub_instance]
- enabled = yes
- destination = pubsub.googleapis.com
- credentials file = /etc/netdata/pubsub_credentials.json
- project id = my_project
- topic id = my_topic
-
-[mongodb:my_mongodb_instance]
- enabled = yes
- destination = localhost
- database = my_database
- collection = my_collection
-
-[json:my_json_instance]
- enabled = yes
- destination = localhost:5448
-
-[opentsdb:my_opentsdb_plaintext_instance]
- enabled = yes
- destination = localhost:4242
-
-[opentsdb:http:my_opentsdb_http_instance]
- enabled = yes
- destination = localhost:4242
- username = my_username
- password = my_password
-
-[opentsdb:https:my_opentsdb_https_instance]
- enabled = yes
- destination = localhost:8082
-```
-
-### Sections
-
-- `[exporting:global]` is a section where you can set your defaults for all exporting connectors
-- `[prometheus:exporter]` defines settings for Prometheus exporter API queries (e.g.:
- `http://NODE:19999/api/v1/allmetrics?format=prometheus&help=yes&source=as-collected`).
-- `[<type>:<name>]` keeps settings for a particular exporting connector instance, where:
- - `type` selects the exporting connector type: graphite | opentsdb:telnet | opentsdb:http |
- prometheus_remote_write | json | kinesis | pubsub | mongodb. For graphite, opentsdb,
- json, and prometheus_remote_write connectors you can also use `:http` or `:https` modifiers
- (e.g.: `opentsdb:https`).
- - `name` can be arbitrary instance name you chose.
-
-### Options
-
-Configure individual connectors and override any global settings with the following options.
-
-- `enabled = yes | no`, enables or disables an exporting connector instance
-
-- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, IPs (IPv4 and IPv6) and
- ports to connect to. Netdata will use the **first available** to send the metrics.
-
- The format of each item in this list, is: `[PROTOCOL:]IP[:PORT]`.
-
- `PROTOCOL` can be `udp` or `tcp`. `tcp` is the default and only supported by the current exporting engine.
-
- `IP` can be `XX.XX.XX.XX` (IPv4), or `[XX:XX...XX:XX]` (IPv6). For IPv6 you can to enclose the IP in `[]` to
- separate it from the port.
-
- `PORT` can be a number of a service name. If omitted, the default port for the exporting connector will be used
- (graphite = 2003, opentsdb = 4242).
-
- Example IPv4:
-
-```conf
- destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242
-```
-
- Example IPv6 and IPv4 together:
-
-```conf
- destination = [ffff:...:0001]:2003 10.11.12.1:2003
-```
-
- When multiple servers are defined, Netdata will try the next one when the previous one fails.
-
- Netdata also ships `nc-exporting.sh`, a script that can be used as a fallback exporting connector to save the
- metrics to disk and push them to the time-series database when it becomes available again. It can also be used to
- monitor / trace / debug the metrics Netdata generates.
-
- For the Kinesis exporting connector `destination` should be set to an AWS region (for example, `us-east-1`).
-
- For the MongoDB exporting connector `destination` should be set to a
- [MongoDB URI](https://docs.mongodb.com/manual/reference/connection-string/).
-
- For the Pub/Sub exporting connector `destination` can be set to a specific service endpoint.
-
-- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of data that will
- be sent to the external database.
-
-- `hostname = my-name`, is the hostname to be used for sending data to the external database server. By default this
- is `[global].hostname`.
-
-- `prefix = Netdata`, is the prefix to add to all metrics.
-
-- `update every = 10`, is the number of seconds between sending data to the external database. Netdata will add some
- randomness to this number, to prevent stressing the external server when many Netdata servers send data to the same
- database. This randomness does not affect the quality of the data, only the time they are sent.
-
-- `buffer on failures = 10`, is the number of iterations (each iteration is `update every` seconds) to buffer data,
- when the external database server is not available. If the server fails to receive the data after that many
- failures, data loss on the connector instance is expected (Netdata will also log it).
-
-- `timeout ms = 20000`, is the timeout in milliseconds to wait for the external database server to process the data.
- By default this is `2 * update_every * 1000`.
-
-- `send hosts matching = localhost *` includes one or more space separated patterns, using `*` as wildcard (any number
- of times within each pattern). The patterns are checked against the hostname (the localhost is always checked as
- `localhost`), allowing us to filter which hosts will be sent to the external database when this Netdata is a central
- Netdata aggregating multiple hosts. A pattern starting with `!` gives a negative match. So to match all hosts named
- `*db*` except hosts containing `*child*`, use `!*child* *db*` (so, the order is important: the first
- pattern matching the hostname will be used - positive or negative).
-
-- `send charts matching = *` includes one or more space separated patterns, using `*` as wildcard (any number of times
- within each pattern). The patterns are checked against both chart id and chart name. A pattern starting with `!`
- gives a negative match. So to match all charts named `apps.*` except charts ending in `*reads`, use `!*reads
- apps.*` (so, the order is important: the first pattern matching the chart id or the chart name will be used -
- positive or negative). There is also a URL parameter `filter` that can be used while querying `allmetrics`. The URL
- parameter has a higher priority than the configuration option.
-
-- `send names instead of ids = yes | no` controls the metric names Netdata should send to the external database.
- Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system
- and names are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several
- cases they are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
-
-- `send configured labels = yes | no` controls if host labels defined in the `[host labels]` section in `netdata.conf`
- should be sent to the external database
-
-- `send automatic labels = yes | no` controls if automatically created labels, like `_os_name` or `_architecture`
- should be sent to the external database
-
-## HTTPS
-
-Netdata can send metrics to external databases using the TLS/SSL protocol. Unfortunately, some of
-them does not support encrypted connections, so you will have to configure a reverse proxy to enable
-HTTPS communication between Netdata and an external database. You can set up a reverse proxy with
-[Nginx](https://github.com/netdata/netdata/blob/master/docs/Running-behind-nginx.md).
-
-## Exporting engine monitoring
-
-Netdata creates five charts in the dashboard, under the **Netdata Monitoring** section, to help you monitor the health
-and performance of the exporting engine itself:
-
-1. **Buffered metrics**, the number of metrics Netdata added to the buffer for dispatching them to the
- external database server.
-
-2. **Exporting data size**, the amount of data (in KB) Netdata added the buffer.
-
-3. **Exporting operations**, the number of operations performed by Netdata.
-
-4. **Exporting thread CPU usage**, the CPU resources consumed by the Netdata thread, that is responsible for sending
- the metrics to the external database server.
-
-![image](https://cloud.githubusercontent.com/assets/2662304/20463536/eb196084-af3d-11e6-8ee5-ddbd3b4d8449.png)
-
-## Exporting engine alerts
-
-Netdata adds 3 alerts:
-
-1. `exporting_last_buffering`, number of seconds since the last successful buffering of exported data
-2. `exporting_metrics_sent`, percentage of metrics sent to the external database server
-3. `exporting_metrics_lost`, number of metrics lost due to repeating failures to contact the external database server
-
-![image](https://cloud.githubusercontent.com/assets/2662304/20463779/a46ed1c2-af43-11e6-91a5-07ca4533cac3.png)
-
-
diff --git a/exporting/aws_kinesis/Makefile.am b/exporting/aws_kinesis/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/exporting/aws_kinesis/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/exporting/aws_kinesis/metadata.yaml b/exporting/aws_kinesis/metadata.yaml
deleted file mode 100644
index 96e4044ec..000000000
--- a/exporting/aws_kinesis/metadata.yaml
+++ /dev/null
@@ -1,173 +0,0 @@
-# yamllint disable rule:line-length
----
-id: 'export-aws-kinesis'
-meta:
- name: 'AWS Kinesis'
- link: 'https://aws.amazon.com/kinesis/'
- categories:
- - export
- icon_filename: 'aws-kinesis.svg'
-keywords:
- - exporter
- - AWS
- - Kinesis
-overview:
- exporter_description: |
- Export metrics to AWS Kinesis Data Streams
- exporter_limitations: ''
-setup:
- prerequisites:
- list:
- - title: ''
- description: |
- - First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++
- - Here are the instructions when building from source, to ensure 3rd party dependencies are installed:
- ```bash
- git clone --recursive https://github.com/aws/aws-sdk-cpp.git
- cd aws-sdk-cpp/
- git submodule update --init --recursive
- mkdir BUILT
- cd BUILT
- cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis ..
- make
- make install
- ```
- - `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled.
- - Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.
- configuration:
- file:
- name: 'exporting.conf'
- options:
- description: |
- Netdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly.
- The following options can be defined for this exporter.
- folding:
- title: 'Config options'
- enabled: true
- list:
- - name: 'enabled'
- default_value: 'no'
- description: 'Enables or disables an exporting connector instance (yes|no).'
- required: true
- - name: 'destination'
- default_value: 'no'
- description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
- required: true
- detailed_description: |
- The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
- - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
- - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
- - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
-
- Example IPv4:
- ```yaml
- destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242
- ```
- Example IPv6 and IPv4 together:
- ```yaml
- destination = [ffff:...:0001]:2003 10.11.12.1:2003
- ```
- When multiple servers are defined, Netdata will try the next one when the previous one fails.
- - name: 'username'
- default_value: 'my_username'
- description: 'Username for HTTP authentication'
- required: false
- - name: 'password'
- default_value: 'my_password'
- description: 'Password for HTTP authentication'
- required: false
- - name: 'data source'
- default_value: ''
- description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
- required: false
- - name: 'hostname'
- default_value: '[global].hostname'
- description: 'The hostname to be used for sending data to the external database server.'
- required: false
- - name: 'prefix'
- default_value: 'Netdata'
- description: 'The prefix to add to all metrics.'
- required: false
- - name: 'update every'
- default_value: '10'
- description: |
- Frequency of sending sending data to the external database, in seconds.
- required: false
- detailed_description: |
- Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
- send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
- - name: 'buffer on failures'
- default_value: '10'
- description: |
- The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
- required: false
- detailed_description: |
- If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
- - name: 'timeout ms'
- default_value: '2 * update_every * 1000'
- description: 'The timeout in milliseconds to wait for the external database server to process the data.'
- required: false
- - name: 'send hosts matching'
- default_value: 'localhost *'
- description: |
- Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns).
- required: false
- detailed_description: |
- Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
- The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
- filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
-
- A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
- use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
- - name: 'send charts matching'
- default_value: '*'
- description: |
- One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
- required: false
- detailed_description: |
- A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
- use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
- positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
- has a higher priority than the configuration option.
- - name: 'send names instead of ids'
- default_value: ''
- description: 'Controls the metric names Netdata should send to the external database (yes|no).'
- required: false
- detailed_description: |
- Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
- are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
- different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
- - name: 'send configured labels'
- default_value: ''
- description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
- required: false
- - name: 'send automatic labels'
- default_value: ''
- description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
- required: false
- examples:
- folding:
- enabled: true
- title: ''
- list:
- - name: 'Example configuration'
- folding:
- enabled: false
- description: 'Basic configuration'
- config: |
- [kinesis:my_instance]
- enabled = yes
- destination = us-east-1
- - name: 'Configuration with AWS credentials'
- folding:
- enabled: false
- description: 'Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.'
- config: |
- [kinesis:my_instance]
- enabled = yes
- destination = us-east-1
- # AWS credentials
- aws_access_key_id = your_access_key_id
- aws_secret_access_key = your_secret_access_key
- # destination stream
- stream name = your_stream_name
diff --git a/exporting/graphite/Makefile.am b/exporting/graphite/Makefile.am
deleted file mode 100644
index babdcf0df..000000000
--- a/exporting/graphite/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/exporting/graphite/integrations/influxdb.md b/exporting/graphite/integrations/influxdb.md
deleted file mode 100644
index df401bf01..000000000
--- a/exporting/graphite/integrations/influxdb.md
+++ /dev/null
@@ -1,172 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/graphite/integrations/influxdb.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/graphite/metadata.yaml"
-sidebar_label: "InfluxDB"
-learn_status: "Published"
-learn_rel_path: "Exporting"
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
-endmeta-->
-
-# InfluxDB
-
-
-<img src="https://netdata.cloud/img/influxdb.svg" width="150"/>
-
-
-Use the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,
-further analysis, or correlation with data from other sources.
-
-
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Setup
-
-### Prerequisites
-
-####
-
-- You have already installed Netdata and Graphite.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `exporting.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config exporting.conf
-```
-#### Options
-
-The following options can be defined for this exporter.
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |
-| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |
-| username | Username for HTTP authentication | my_username | no |
-| password | Password for HTTP authentication | my_password | no |
-| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |
-| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |
-| prefix | The prefix to add to all metrics. | netdata | no |
-| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
-| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
-| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
-| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
-| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
-| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
-| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |
-
-##### destination
-
-The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
-- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
-- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
-- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
-
-Example IPv4:
- ```yaml
- destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
- ```
-Example IPv6 and IPv4 together:
-```yaml
-destination = [ffff:...:0001]:2003 10.11.12.1:2003
-```
-When multiple servers are defined, Netdata will try the next one when the previous one fails.
-
-
-##### update every
-
-Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
-send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
-
-
-##### buffer on failures
-
-If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
-
-
-##### send hosts matching
-
-Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
-The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
-filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
-
-A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
-use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
-
-
-##### send charts matching
-
-A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
-use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
-positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
-has a higher priority than the configuration option.
-
-
-##### send names instead of ids
-
-Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
-are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
-different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
-
-
-</details>
-
-#### Examples
-
-##### Example configuration
-
-Basic configuration
-
-```yaml
-[graphite:netdata]
- enabled = yes
- destination = localhost:2003
-
-```
-##### Configuration with HTTPS and HTTP authentication
-
-Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.
-
-```yaml
-[graphite:netdata]
- enabled = yes
- destination = localhost:2003
- username = my_username
- password = my_password
-
-```
-##### Detailed Configuration for a remote, secure host
-
-Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.
-
-```yaml
-[graphite:https:netdata]
- enabled = yes
- username = my_username
- password = my_password
- destination = 10.10.1.114:2003
- # data source = average
- # prefix = netdata
- # hostname = my_hostname
- # update every = 10
- # buffer on failures = 10
- # timeout ms = 20000
- # send names instead of ids = yes
- # send charts matching = *
- # send hosts matching = localhost *
-
-```
-
diff --git a/exporting/graphite/metadata.yaml b/exporting/graphite/metadata.yaml
deleted file mode 100644
index ad8c7539b..000000000
--- a/exporting/graphite/metadata.yaml
+++ /dev/null
@@ -1,212 +0,0 @@
-# yamllint disable rule:line-length
----
-- &graphexport
- id: 'export-graphite'
- meta: &meta
- name: 'Graphite'
- link: 'https://graphite.readthedocs.io/en/latest/'
- categories:
- - export
- icon_filename: 'graphite.png'
- keywords:
- - exporter
- - graphite
- - remote write
- - time series
- overview:
- exporter_description: |
- Use the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,
- further analysis, or correlation with data from other sources.
- exporter_limitations: ''
- setup:
- prerequisites:
- list:
- - title: ''
- description: |
- - You have already installed Netdata and Graphite.
- configuration:
- file:
- name: 'exporting.conf'
- options:
- description: 'The following options can be defined for this exporter.'
- folding:
- title: 'Config options'
- enabled: true
- list:
- - name: 'enabled'
- default_value: 'no'
- description: 'Enables or disables an exporting connector instance (yes|no).'
- required: true
- - name: 'destination'
- default_value: 'no'
- description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
- required: true
- detailed_description: |
- The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
- - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
- - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
- - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
-
- Example IPv4:
- ```yaml
- destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
- ```
- Example IPv6 and IPv4 together:
- ```yaml
- destination = [ffff:...:0001]:2003 10.11.12.1:2003
- ```
- When multiple servers are defined, Netdata will try the next one when the previous one fails.
- - name: 'username'
- default_value: 'my_username'
- description: 'Username for HTTP authentication'
- required: false
- - name: 'password'
- default_value: 'my_password'
- description: 'Password for HTTP authentication'
- required: false
- - name: 'data source'
- default_value: ''
- description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
- required: false
- - name: 'hostname'
- default_value: '[global].hostname'
- description: 'The hostname to be used for sending data to the external database server.'
- required: false
- - name: 'prefix'
- default_value: 'netdata'
- description: 'The prefix to add to all metrics.'
- required: false
- - name: 'update every'
- default_value: '10'
- description: |
- Frequency of sending sending data to the external database, in seconds.
- required: false
- detailed_description: |
- Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
- send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
- - name: 'buffer on failures'
- default_value: '10'
- description: |
- The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
- required: false
- detailed_description: |
- If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
- - name: 'timeout ms'
- default_value: '20000'
- description: 'The timeout in milliseconds to wait for the external database server to process the data.'
- required: false
- - name: 'send hosts matching'
- default_value: 'localhost *'
- description: |
- Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns).
- required: false
- detailed_description: |
- Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
- The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
- filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
-
- A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
- use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
- - name: 'send charts matching'
- default_value: '*'
- description: |
- One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
- required: false
- detailed_description: |
- A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
- use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
- positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
- has a higher priority than the configuration option.
- - name: 'send names instead of ids'
- default_value: ''
- description: 'Controls the metric names Netdata should send to the external database (yes|no).'
- required: false
- detailed_description: |
- Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
- are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
- different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
- - name: 'send configured labels'
- default_value: ''
- description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
- required: false
- - name: 'send automatic labels'
- default_value: ''
- description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
- required: false
- examples:
- folding:
- enabled: true
- title: ''
- list:
- - name: 'Example configuration'
- folding:
- enabled: false
- description: 'Basic configuration'
- config: |
- [graphite:netdata]
- enabled = yes
- destination = localhost:2003
- - name: 'Configuration with HTTPS and HTTP authentication'
- folding:
- enabled: false
- description: 'Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.'
- config: |
- [graphite:netdata]
- enabled = yes
- destination = localhost:2003
- username = my_username
- password = my_password
- - name: 'Detailed Configuration for a remote, secure host'
- folding:
- enabled: false
- description: 'Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.'
- config: |
- [graphite:https:netdata]
- enabled = yes
- username = my_username
- password = my_password
- destination = 10.10.1.114:2003
- # data source = average
- # prefix = netdata
- # hostname = my_hostname
- # update every = 10
- # buffer on failures = 10
- # timeout ms = 20000
- # send names instead of ids = yes
- # send charts matching = *
- # send hosts matching = localhost *
-- <<: *graphexport
- id: 'export-blueflood'
- meta:
- <<: *meta
- name: Blueflood
- link: http://blueflood.io/
- icon_filename: 'blueflood.png'
- keywords:
- - export
- - Blueflood
- - graphite
-- <<: *graphexport
- id: 'export-influxdb'
- meta:
- <<: *meta
- name: InfluxDB
- link: https://www.influxdata.com/
- icon_filename: 'influxdb.svg'
- keywords:
- - InfluxDB
- - Influx
- - export
- - graphite
-- <<: *graphexport
- id: 'export-kairosdb'
- meta:
- <<: *meta
- name: KairosDB
- link: https://kairosdb.github.io/
- icon_filename: 'kairos.png'
- keywords:
- - KairosDB
- - kairos
- - export
- - graphite
diff --git a/exporting/json/Makefile.am b/exporting/json/Makefile.am
deleted file mode 100644
index babdcf0df..000000000
--- a/exporting/json/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/exporting/json/json.c b/exporting/json/json.c
deleted file mode 100644
index d916fe774..000000000
--- a/exporting/json/json.c
+++ /dev/null
@@ -1,349 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "json.h"
-
-/**
- * Initialize JSON connector instance
- *
- * @param instance an instance data structure.
- * @return Returns 0 on success, 1 on failure.
- */
-int init_json_instance(struct instance *instance)
-{
- instance->worker = simple_connector_worker;
-
- struct simple_connector_config *connector_specific_config = callocz(1, sizeof(struct simple_connector_config));
- instance->config.connector_specific_config = (void *)connector_specific_config;
- connector_specific_config->default_port = 5448;
-
- struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
- instance->connector_specific_data = connector_specific_data;
-
- instance->start_batch_formatting = NULL;
- instance->start_host_formatting = format_host_labels_json_plaintext;
- instance->start_chart_formatting = NULL;
-
- if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
- instance->metric_formatting = format_dimension_collected_json_plaintext;
- else
- instance->metric_formatting = format_dimension_stored_json_plaintext;
-
- instance->end_chart_formatting = NULL;
- instance->variables_formatting = NULL;
- instance->end_host_formatting = flush_host_labels;
- instance->end_batch_formatting = simple_connector_end_batch;
-
- instance->prepare_header = NULL;
-
- instance->check_response = exporting_discard_response;
-
- instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
- if (!instance->buffer) {
- netdata_log_error("EXPORTING: cannot create buffer for json exporting connector instance %s", instance->config.name);
- return 1;
- }
-
- simple_connector_init(instance);
-
- if (uv_mutex_init(&instance->mutex))
- return 1;
- if (uv_cond_init(&instance->cond_var))
- return 1;
-
- return 0;
-}
-
-/**
- * Initialize JSON connector instance for HTTP protocol
- *
- * @param instance an instance data structure.
- * @return Returns 0 on success, 1 on failure.
- */
-int init_json_http_instance(struct instance *instance)
-{
- instance->worker = simple_connector_worker;
-
- struct simple_connector_config *connector_specific_config = callocz(1, sizeof(struct simple_connector_config));
- instance->config.connector_specific_config = (void *)connector_specific_config;
- connector_specific_config->default_port = 5448;
-
- struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
- instance->connector_specific_data = connector_specific_data;
-
-#ifdef ENABLE_HTTPS
- connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION;
- if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
- netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX);
- }
-#endif
-
- instance->start_batch_formatting = open_batch_json_http;
- instance->start_host_formatting = format_host_labels_json_plaintext;
- instance->start_chart_formatting = NULL;
-
- if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
- instance->metric_formatting = format_dimension_collected_json_plaintext;
- else
- instance->metric_formatting = format_dimension_stored_json_plaintext;
-
- instance->end_chart_formatting = NULL;
- instance->variables_formatting = NULL;
- instance->end_host_formatting = flush_host_labels;
- instance->end_batch_formatting = close_batch_json_http;
-
- instance->prepare_header = json_http_prepare_header;
-
- instance->check_response = exporting_discard_response;
-
- instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
-
- simple_connector_init(instance);
-
- if (uv_mutex_init(&instance->mutex))
- return 1;
- if (uv_cond_init(&instance->cond_var))
- return 1;
-
- return 0;
-}
-
-/**
- * Format host labels for JSON connector
- *
- * @param instance an instance data structure.
- * @param host a data collecting host.
- * @return Always returns 0.
- */
-
-int format_host_labels_json_plaintext(struct instance *instance, RRDHOST *host)
-{
- if (!instance->labels_buffer)
- instance->labels_buffer = buffer_create(1024, &netdata_buffers_statistics.buffers_exporters);
-
- if (unlikely(!sending_labels_configured(instance)))
- return 0;
-
- buffer_strcat(instance->labels_buffer, "\"labels\":{");
- rrdlabels_to_buffer(host->rrdlabels, instance->labels_buffer, "", ":", "\"", ",",
- exporting_labels_filter_callback, instance,
- NULL, sanitize_json_string);
- buffer_strcat(instance->labels_buffer, "},");
-
- return 0;
-}
-
-/**
- * Format dimension using collected data for JSON connector
- *
- * @param instance an instance data structure.
- * @param rd a dimension.
- * @return Always returns 0.
- */
-int format_dimension_collected_json_plaintext(struct instance *instance, RRDDIM *rd)
-{
- RRDSET *st = rd->rrdset;
- RRDHOST *host = st->rrdhost;
-
- const char *tags_pre = "", *tags_post = "", *tags = rrdhost_tags(host);
- if (!tags)
- tags = "";
-
- if (*tags) {
- if (*tags == '{' || *tags == '[' || *tags == '"') {
- tags_pre = "\"host_tags\":";
- tags_post = ",";
- } else {
- tags_pre = "\"host_tags\":\"";
- tags_post = "\",";
- }
- }
-
- if (instance->config.type == EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
- if (buffer_strlen((BUFFER *)instance->buffer) > 2)
- buffer_strcat(instance->buffer, ",\n");
- }
-
- buffer_sprintf(
- instance->buffer,
-
- "{"
- "\"prefix\":\"%s\","
- "\"hostname\":\"%s\","
- "%s%s%s"
- "%s"
-
- "\"chart_id\":\"%s\","
- "\"chart_name\":\"%s\","
- "\"chart_family\":\"%s\","
- "\"chart_context\":\"%s\","
- "\"chart_type\":\"%s\","
- "\"units\":\"%s\","
-
- "\"id\":\"%s\","
- "\"name\":\"%s\","
- "\"value\":" COLLECTED_NUMBER_FORMAT ","
-
- "\"timestamp\":%llu}",
-
- instance->config.prefix,
- (host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
- tags_pre,
- tags,
- tags_post,
- instance->labels_buffer ? buffer_tostring(instance->labels_buffer) : "",
-
- rrdset_id(st),
- rrdset_name(st),
- rrdset_family(st),
- rrdset_context(st),
- rrdset_parts_type(st),
- rrdset_units(st),
- rrddim_id(rd),
- rrddim_name(rd),
- rd->collector.last_collected_value,
-
- (unsigned long long)rd->collector.last_collected_time.tv_sec);
-
- if (instance->config.type != EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
- buffer_strcat(instance->buffer, "\n");
- }
-
- return 0;
-}
-
-/**
- * Format dimension using a calculated value from stored data for JSON connector
- *
- * @param instance an instance data structure.
- * @param rd a dimension.
- * @return Always returns 0.
- */
-int format_dimension_stored_json_plaintext(struct instance *instance, RRDDIM *rd)
-{
- RRDSET *st = rd->rrdset;
- RRDHOST *host = st->rrdhost;
-
- time_t last_t;
- NETDATA_DOUBLE value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
-
- if(isnan(value))
- return 0;
-
- const char *tags_pre = "", *tags_post = "", *tags = rrdhost_tags(host);
- if (!tags)
- tags = "";
-
- if (*tags) {
- if (*tags == '{' || *tags == '[' || *tags == '"') {
- tags_pre = "\"host_tags\":";
- tags_post = ",";
- } else {
- tags_pre = "\"host_tags\":\"";
- tags_post = "\",";
- }
- }
-
- if (instance->config.type == EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
- if (buffer_strlen((BUFFER *)instance->buffer) > 2)
- buffer_strcat(instance->buffer, ",\n");
- }
-
- buffer_sprintf(
- instance->buffer,
- "{"
- "\"prefix\":\"%s\","
- "\"hostname\":\"%s\","
- "%s%s%s"
- "%s"
-
- "\"chart_id\":\"%s\","
- "\"chart_name\":\"%s\","
- "\"chart_family\":\"%s\","
- "\"chart_context\": \"%s\","
- "\"chart_type\":\"%s\","
- "\"units\": \"%s\","
-
- "\"id\":\"%s\","
- "\"name\":\"%s\","
- "\"value\":" NETDATA_DOUBLE_FORMAT ","
-
- "\"timestamp\": %llu}",
-
- instance->config.prefix,
- (host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
- tags_pre,
- tags,
- tags_post,
- instance->labels_buffer ? buffer_tostring(instance->labels_buffer) : "",
-
- rrdset_id(st),
- rrdset_name(st),
- rrdset_family(st),
- rrdset_context(st),
- rrdset_parts_type(st),
- rrdset_units(st),
- rrddim_id(rd),
- rrddim_name(rd),
- value,
-
- (unsigned long long)last_t);
-
- if (instance->config.type != EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
- buffer_strcat(instance->buffer, "\n");
- }
-
- return 0;
-}
-
-/**
- * Open a JSON list for a bach
- *
- * @param instance an instance data structure.
- * @return Always returns 0.
- */
-int open_batch_json_http(struct instance *instance)
-{
- buffer_strcat(instance->buffer, "[\n");
-
- return 0;
-}
-
-/**
- * Close a JSON list for a bach and update buffered bytes counter
- *
- * @param instance an instance data structure.
- * @return Always returns 0.
- */
-int close_batch_json_http(struct instance *instance)
-{
- buffer_strcat(instance->buffer, "\n]\n");
-
- simple_connector_end_batch(instance);
-
- return 0;
-}
-
-/**
- * Prepare HTTP header
- *
- * @param instance an instance data structure.
- * @return Returns 0 on success, 1 on failure.
- */
-void json_http_prepare_header(struct instance *instance)
-{
- struct simple_connector_data *simple_connector_data = instance->connector_specific_data;
-
- buffer_sprintf(
- simple_connector_data->last_buffer->header,
- "POST /api/put HTTP/1.1\r\n"
- "Host: %s\r\n"
- "%s"
- "Content-Type: application/json\r\n"
- "Content-Length: %lu\r\n"
- "\r\n",
- instance->config.destination,
- simple_connector_data->auth_string ? simple_connector_data->auth_string : "",
- (unsigned long int) buffer_strlen(simple_connector_data->last_buffer->buffer));
-
- return;
-}
diff --git a/exporting/json/metadata.yaml b/exporting/json/metadata.yaml
deleted file mode 100644
index d9f93e4a1..000000000
--- a/exporting/json/metadata.yaml
+++ /dev/null
@@ -1,151 +0,0 @@
-# yamllint disable rule:line-length
----
-id: 'export-json'
-meta:
- name: 'JSON'
- link: 'https://learn.netdata.cloud/docs/exporting/json-document-databases'
- categories:
- - export
- icon_filename: 'json.svg'
-keywords:
- - exporter
- - json
-overview:
- exporter_description: |
- Use the JSON connector for the exporting engine to archive your agent's metrics to JSON document databases for long-term storage,
- further analysis, or correlation with data from other sources
- exporter_limitations: ''
-setup:
- prerequisites:
- list:
- - title: ''
- description: ''
- configuration:
- file:
- name: 'exporting.conf'
- options:
- description: |
- The following options can be defined for this exporter.
- folding:
- title: 'Config options'
- enabled: true
- list:
- - name: 'enabled'
- default_value: 'no'
- description: 'Enables or disables an exporting connector instance (yes|no).'
- required: true
- - name: 'destination'
- default_value: 'pubsub.googleapis.com'
- description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
- required: true
- detailed_description: |
- The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
- - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
- - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
- - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
-
- Example IPv4:
- ```yaml
- destination = localhost:5448
- ```
- When multiple servers are defined, Netdata will try the next one when the previous one fails.
- - name: 'username'
- default_value: 'my_username'
- description: 'Username for HTTP authentication'
- required: false
- - name: 'password'
- default_value: 'my_password'
- description: 'Password for HTTP authentication'
- required: false
- - name: 'data source'
- default_value: ''
- description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
- required: false
- - name: 'hostname'
- default_value: '[global].hostname'
- description: 'The hostname to be used for sending data to the external database server.'
- required: false
- - name: 'prefix'
- default_value: 'Netdata'
- description: 'The prefix to add to all metrics.'
- required: false
- - name: 'update every'
- default_value: '10'
- description: |
- Frequency of sending sending data to the external database, in seconds.
- required: false
- detailed_description: |
- Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
- send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
- - name: 'buffer on failures'
- default_value: '10'
- description: |
- The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
- required: false
- detailed_description: |
- If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
- - name: 'timeout ms'
- default_value: '2 * update_every * 1000'
- description: 'The timeout in milliseconds to wait for the external database server to process the data.'
- required: false
- - name: 'send hosts matching'
- default_value: 'localhost *'
- description: |
- Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns).
- required: false
- detailed_description: |
- Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
- The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
- filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
-
- A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
- use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
- - name: 'send charts matching'
- default_value: '*'
- description: |
- One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
- required: false
- detailed_description: |
- A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
- use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
- positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
- has a higher priority than the configuration option.
- - name: 'send names instead of ids'
- default_value: ''
- description: 'Controls the metric names Netdata should send to the external database (yes|no).'
- required: false
- detailed_description: |
- Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
- are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
- different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
- - name: 'send configured labels'
- default_value: ''
- description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
- required: false
- - name: 'send automatic labels'
- default_value: ''
- description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
- required: false
- examples:
- folding:
- enabled: true
- title: ''
- list:
- - name: 'Basic configuration'
- folding:
- enabled: false
- description: ''
- config: |
- [json:my_json_instance]
- enabled = yes
- destination = localhost:5448
- - name: 'Configuration with HTTPS and HTTP authentication'
- folding:
- enabled: false
- description: 'Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `json:https:my_json_instance`.'
- config: |
- [json:my_json_instance]
- enabled = yes
- destination = localhost:5448
- username = my_username
- password = my_password
diff --git a/exporting/mongodb/Makefile.am b/exporting/mongodb/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/exporting/mongodb/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/exporting/mongodb/integrations/mongodb.md b/exporting/mongodb/integrations/mongodb.md
deleted file mode 100644
index 30dfe4f84..000000000
--- a/exporting/mongodb/integrations/mongodb.md
+++ /dev/null
@@ -1,145 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/mongodb/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/mongodb/metadata.yaml"
-sidebar_label: "MongoDB"
-learn_status: "Published"
-learn_rel_path: "Exporting"
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
-endmeta-->
-
-# MongoDB
-
-
-<img src="https://netdata.cloud/img/mongodb.svg" width="150"/>
-
-
-Use the MongoDB connector for the exporting engine to archive your agent's metrics to a MongoDB database
-for long-term storage, further analysis, or correlation with data from other sources.
-
-
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Setup
-
-### Prerequisites
-
-####
-
-- To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher.
-- Next, re-install Netdata from the source, which detects that the required library is now available.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `exporting.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config exporting.conf
-```
-#### Options
-
-The following options can be defined for this exporter.
-
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |
-| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | localhost | yes |
-| username | Username for HTTP authentication | my_username | no |
-| password | Password for HTTP authentication | my_password | no |
-| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |
-| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |
-| prefix | The prefix to add to all metrics. | Netdata | no |
-| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
-| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
-| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
-| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
-| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
-| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
-| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |
-
-##### destination
-
-The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
-- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
-- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
-- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
-
-Example IPv4:
- ```yaml
- destination = 10.11.14.2:27017 10.11.14.3:4242 10.11.14.4:27017
- ```
-Example IPv6 and IPv4 together:
-```yaml
-destination = [ffff:...:0001]:2003 10.11.12.1:2003
-```
-When multiple servers are defined, Netdata will try the next one when the previous one fails.
-
-
-##### update every
-
-Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
-send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
-
-
-##### buffer on failures
-
-If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
-
-
-##### send hosts matching
-
-Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
-The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
-filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
-
-A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
-use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
-
-
-##### send charts matching
-
-A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
-use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
-positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
-has a higher priority than the configuration option.
-
-
-##### send names instead of ids
-
-Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
-are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
-different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
-
-
-</details>
-
-#### Examples
-
-##### Basic configuration
-
-The default socket timeout depends on the exporting connector update interval.
-The timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the sockettimeoutms MongoDB URI option.
-
-
-```yaml
-[mongodb:my_instance]
- enabled = yes
- destination = mongodb://<hostname>
- database = your_database_name
- collection = your_collection_name
-
-```
-
diff --git a/exporting/mongodb/metadata.yaml b/exporting/mongodb/metadata.yaml
deleted file mode 100644
index 30e1e89d8..000000000
--- a/exporting/mongodb/metadata.yaml
+++ /dev/null
@@ -1,151 +0,0 @@
-# yamllint disable rule:line-length
----
-id: 'export-mongodb'
-meta:
- name: 'MongoDB'
- link: 'https://www.mongodb.com/'
- categories:
- - export
- icon_filename: 'mongodb.svg'
-keywords:
- - exporter
- - MongoDB
-overview:
- exporter_description: |
- Use the MongoDB connector for the exporting engine to archive your agent's metrics to a MongoDB database
- for long-term storage, further analysis, or correlation with data from other sources.
- exporter_limitations: ''
-setup:
- prerequisites:
- list:
- - title: ''
- description: |
- - To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher.
- - Next, re-install Netdata from the source, which detects that the required library is now available.
- configuration:
- file:
- name: 'exporting.conf'
- options:
- description: |
- The following options can be defined for this exporter.
- folding:
- title: 'Config options'
- enabled: true
- list:
- - name: 'enabled'
- default_value: 'no'
- description: 'Enables or disables an exporting connector instance (yes|no).'
- required: true
- - name: 'destination'
- default_value: 'localhost'
- description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
- required: true
- detailed_description: |
- The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
- - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
- - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
- - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
-
- Example IPv4:
- ```yaml
- destination = 10.11.14.2:27017 10.11.14.3:4242 10.11.14.4:27017
- ```
- Example IPv6 and IPv4 together:
- ```yaml
- destination = [ffff:...:0001]:2003 10.11.12.1:2003
- ```
- When multiple servers are defined, Netdata will try the next one when the previous one fails.
- - name: 'username'
- default_value: 'my_username'
- description: 'Username for HTTP authentication'
- required: false
- - name: 'password'
- default_value: 'my_password'
- description: 'Password for HTTP authentication'
- required: false
- - name: 'data source'
- default_value: ''
- description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
- required: false
- - name: 'hostname'
- default_value: '[global].hostname'
- description: 'The hostname to be used for sending data to the external database server.'
- required: false
- - name: 'prefix'
- default_value: 'Netdata'
- description: 'The prefix to add to all metrics.'
- required: false
- - name: 'update every'
- default_value: '10'
- description: |
- Frequency of sending sending data to the external database, in seconds.
- required: false
- detailed_description: |
- Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
- send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
- - name: 'buffer on failures'
- default_value: '10'
- description: |
- The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
- required: false
- detailed_description: |
- If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
- - name: 'timeout ms'
- default_value: '2 * update_every * 1000'
- description: 'The timeout in milliseconds to wait for the external database server to process the data.'
- required: false
- - name: 'send hosts matching'
- default_value: 'localhost *'
- description: |
- Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns).
- required: false
- detailed_description: |
- Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
- The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
- filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
-
- A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
- use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
- - name: 'send charts matching'
- default_value: '*'
- description: |
- One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
- required: false
- detailed_description: |
- A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
- use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
- positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
- has a higher priority than the configuration option.
- - name: 'send names instead of ids'
- default_value: ''
- description: 'Controls the metric names Netdata should send to the external database (yes|no).'
- required: false
- detailed_description: |
- Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
- are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
- different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
- - name: 'send configured labels'
- default_value: ''
- description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
- required: false
- - name: 'send automatic labels'
- default_value: ''
- description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
- required: false
- examples:
- folding:
- enabled: true
- title: ''
- list:
- - name: 'Basic configuration'
- folding:
- enabled: false
- description: |
- The default socket timeout depends on the exporting connector update interval.
- The timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the sockettimeoutms MongoDB URI option.
- config: |
- [mongodb:my_instance]
- enabled = yes
- destination = mongodb://<hostname>
- database = your_database_name
- collection = your_collection_name
diff --git a/exporting/opentsdb/Makefile.am b/exporting/opentsdb/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/exporting/opentsdb/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/exporting/opentsdb/metadata.yaml b/exporting/opentsdb/metadata.yaml
deleted file mode 100644
index 505c8c599..000000000
--- a/exporting/opentsdb/metadata.yaml
+++ /dev/null
@@ -1,176 +0,0 @@
-# yamllint disable rule:line-length
----
-id: 'export-opentsdb'
-meta:
- name: 'OpenTSDB'
- link: 'https://github.com/OpenTSDB/opentsdb'
- categories:
- - export
- icon_filename: 'opentsdb.png'
-keywords:
- - exporter
- - OpenTSDB
- - scalable time series
-overview:
- exporter_description: |
- Use the OpenTSDB connector for the exporting engine to archive your Netdata metrics to OpenTSDB databases for long-term storage,
- further analysis, or correlation with data from other sources.
- exporter_limitations: ''
-setup:
- prerequisites:
- list:
- - title: ''
- description: |
- - OpenTSDB and Netdata, installed, configured and operational.
- configuration:
- file:
- name: 'exporting.conf'
- options:
- description: 'The following options can be defined for this exporter.'
- folding:
- title: 'Config options'
- enabled: true
- list:
- - name: 'enabled'
- default_value: 'no'
- description: 'Enables or disables an exporting connector instance (yes|no).'
- required: true
- - name: 'destination'
- default_value: 'no'
- description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
- required: true
- detailed_description: |
- The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
- - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
- - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
- - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used (opentsdb = 4242).
-
- Example IPv4:
- ```yaml
- destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242
- ```
- Example IPv6 and IPv4 together:
- ```yaml
- destination = [ffff:...:0001]:2003 10.11.12.1:2003
- ```
- When multiple servers are defined, Netdata will try the next one when the previous one fails.
- - name: 'username'
- default_value: 'my_username'
- description: 'Username for HTTP authentication'
- required: false
- - name: 'password'
- default_value: 'my_password'
- description: 'Password for HTTP authentication'
- required: false
- - name: 'data source'
- default_value: ''
- description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
- required: false
- - name: 'hostname'
- default_value: '[global].hostname'
- description: 'The hostname to be used for sending data to the external database server.'
- required: false
- - name: 'prefix'
- default_value: 'Netdata'
- description: 'The prefix to add to all metrics.'
- required: false
- - name: 'update every'
- default_value: '10'
- description: |
- Frequency of sending sending data to the external database, in seconds.
- required: false
- detailed_description: |
- Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
- send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
- - name: 'buffer on failures'
- default_value: '10'
- description: |
- The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
- required: false
- detailed_description: |
- If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
- - name: 'timeout ms'
- default_value: '2 * update_every * 1000'
- description: 'The timeout in milliseconds to wait for the external database server to process the data.'
- required: false
- - name: 'send hosts matching'
- default_value: 'localhost *'
- description: |
- Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns).
- required: false
- detailed_description: |
- Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
- The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
- filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
-
- A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
- use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
- - name: 'send charts matching'
- default_value: '*'
- description: |
- One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
- required: false
- detailed_description: |
- A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
- use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
- positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
- has a higher priority than the configuration option.
- - name: 'send names instead of ids'
- default_value: ''
- description: 'Controls the metric names Netdata should send to the external database (yes|no).'
- required: false
- detailed_description: |
- Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
- are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
- different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
- - name: 'send configured labels'
- default_value: ''
- description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
- required: false
- - name: 'send automatic labels'
- default_value: ''
- description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
- required: false
- examples:
- folding:
- enabled: true
- title: ''
- list:
- - name: 'Minimal configuration'
- folding:
- enabled: false
- description: |
- Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol.
- For example: `opentsdb:http:my_opentsdb_instance`, `opentsdb:https:my_opentsdb_instance`.
- config: |
- [opentsdb:my_opentsdb_instance]
- enabled = yes
- destination = localhost:4242
- - name: 'HTTP authentication'
- folding:
- enabled: false
- description: ''
- config: |
- [opentsdb:my_opentsdb_instance]
- enabled = yes
- destination = localhost:4242
- username = my_username
- password = my_password
- - name: 'Using `send hosts matching`'
- folding:
- enabled: false
- description: ''
- config: |
- [opentsdb:my_opentsdb_instance]
- enabled = yes
- destination = localhost:4242
- send hosts matching = localhost *
- - name: 'Using `send charts matching`'
- folding:
- enabled: false
- description: ''
- config: |
- [opentsdb:my_opentsdb_instance]
- enabled = yes
- destination = localhost:4242
- send charts matching = *
diff --git a/exporting/prometheus/Makefile.am b/exporting/prometheus/Makefile.am
deleted file mode 100644
index 334fca81c..000000000
--- a/exporting/prometheus/Makefile.am
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-SUBDIRS = \
- remote_write \
- $(NULL)
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/exporting/prometheus/README.md b/exporting/prometheus/README.md
deleted file mode 100644
index abd81554d..000000000
--- a/exporting/prometheus/README.md
+++ /dev/null
@@ -1,361 +0,0 @@
-# Using Netdata with Prometheus
-
-Netdata supports exporting metrics to Prometheus in two ways:
-
- - You can [configure Prometheus to scrape Netdata metrics](#configure-prometheus-to-scrape-netdata-metrics).
-
- - You can [configure Netdata to push metrics to Prometheus](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
- , using the Prometheus remote write API.
-
-## Netdata support for Prometheus
-
-Regardless of the methodology, you first need to understand how Netdata structures the metrics it exports to Prometheus
-and the capabilities it provides. The examples provided in this document assume that you will be using Netdata as
-a metrics endpoint, but the concepts apply as well to the remote write API method.
-
-### Understanding Netdata metrics
-
-#### Charts
-
-Each chart in Netdata has several properties (common to all its metrics):
-
-- `chart_id` - uniquely identifies a chart.
-
-- `chart_name` - a more human friendly name for `chart_id`, also unique.
-
-- `context` - this is the template of the chart. All disk I/O charts have the same context, all mysql requests charts
- have the same context, etc. This is used for alert templates to match all the charts they should be attached to.
-
-- `family` groups a set of charts together. It is used as the submenu of the dashboard.
-
-- `units` is the units for all the metrics attached to the chart.
-
-#### Dimensions
-
-Then each Netdata chart contains metrics called `dimensions`. All the dimensions of a chart have the same units of
-measurement, and are contextually in the same category (ie. the metrics for disk bandwidth are `read` and `write` and
-they are both in the same chart).
-
-### Netdata data source
-
-Netdata can send metrics to Prometheus from 3 data sources:
-
-- `as collected` or `raw` - this data source sends the metrics to Prometheus as they are collected. No conversion is
- done by Netdata. The latest value for each metric is just given to Prometheus. This is the most preferred method by
- Prometheus, but it is also the harder to work with. To work with this data source, you will need to understand how
- to get meaningful values out of them.
-
- The format of the metrics is: `CONTEXT{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
-
- If the metric is a counter (`incremental` in Netdata lingo), `_total` is appended the context.
-
- Unlike Prometheus, Netdata allows each dimension of a chart to have a different algorithm and conversion constants
- (`multiplier` and `divisor`). In this case, that the dimensions of a charts are heterogeneous, Netdata will use this
- format: `CONTEXT_DIMENSION{chart="CHART",family="FAMILY"}`
-
-- `average` - this data source uses the Netdata database to send the metrics to Prometheus as they are presented on
- the Netdata dashboard. So, all the metrics are sent as gauges, at the units they are presented in the Netdata
- dashboard charts. This is the easiest to work with.
-
- The format of the metrics is: `CONTEXT_UNITS_average{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
-
- When this source is used, Netdata keeps track of the last access time for each Prometheus server fetching the
- metrics. This last access time is used at the subsequent queries of the same Prometheus server to identify the
- time-frame the `average` will be calculated.
-
- So, no matter how frequently Prometheus scrapes Netdata, it will get all the database data.
- To identify each Prometheus server, Netdata uses by default the IP of the client fetching the metrics.
-
- If there are multiple Prometheus servers fetching data from the same Netdata, using the same IP, each Prometheus
- server can append `server=NAME` to the URL. Netdata will use this `NAME` to uniquely identify the Prometheus server.
-
-- `sum` or `volume`, is like `average` but instead of averaging the values, it sums them.
-
- The format of the metrics is: `CONTEXT_UNITS_sum{chart="CHART",family="FAMILY",dimension="DIMENSION"}`. All the
- other operations are the same with `average`.
-
- To change the data source to `sum` or `as-collected` you need to provide the `source` parameter in the request URL.
- e.g.: `http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes&source=as-collected`
-
- Keep in mind that early versions of Netdata were sending the metrics as: `CHART_DIMENSION{}`.
-
-### Querying Metrics
-
-Fetch with your web browser this URL:
-
-`http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes`
-
-_(replace `your.netdata.ip` with the ip or hostname of your Netdata server)_
-
-Netdata will respond with all the metrics it sends to Prometheus.
-
-If you search that page for `"system.cpu"` you will find all the metrics Netdata is exporting to Prometheus for this
-chart. `system.cpu` is the chart name on the Netdata dashboard (on the Netdata dashboard all charts have a text heading
-such as : `Total CPU utilization (system.cpu)`. What we are interested here in the chart name: `system.cpu`).
-
-Searching for `"system.cpu"` reveals:
-
-```sh
-# COMMENT homogeneous chart "system.cpu", context "system.cpu", family "cpu", units "percentage"
-# COMMENT netdata_system_cpu_percentage_average: dimension "guest_nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
-netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest_nice"} 0.0000000 1500066662000
-# COMMENT netdata_system_cpu_percentage_average: dimension "guest", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
-netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest"} 1.7837326 1500066662000
-# COMMENT netdata_system_cpu_percentage_average: dimension "steal", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
-netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="steal"} 0.0000000 1500066662000
-# COMMENT netdata_system_cpu_percentage_average: dimension "softirq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
-netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="softirq"} 0.5275442 1500066662000
-# COMMENT netdata_system_cpu_percentage_average: dimension "irq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
-netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="irq"} 0.2260836 1500066662000
-# COMMENT netdata_system_cpu_percentage_average: dimension "user", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
-netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="user"} 2.3362762 1500066662000
-# COMMENT netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
-netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 1.7961062 1500066662000
-# COMMENT netdata_system_cpu_percentage_average: dimension "nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
-netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="nice"} 0.0000000 1500066662000
-# COMMENT netdata_system_cpu_percentage_average: dimension "iowait", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
-netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="iowait"} 0.9671802 1500066662000
-# COMMENT netdata_system_cpu_percentage_average: dimension "idle", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
-netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="idle"} 92.3630770 1500066662000
-```
-
-_(Netdata response for `system.cpu` with source=`average`)_
-
-In `average` or `sum` data sources, all values are normalized and are reported to Prometheus as gauges. Now, use the
-'expression' text form in Prometheus. Begin to type the metrics we are looking for: `netdata_system_cpu`. You should see
-that the text form begins to auto-fill as Prometheus knows about this metric.
-
-If the data source was `as collected`, the response would be:
-
-```sh
-# COMMENT homogeneous chart "system.cpu", context "system.cpu", family "cpu", units "percentage"
-# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest_nice", value * 1 / 1 delta gives percentage (counter)
-netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest_nice"} 0 1500066716438
-# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest", value * 1 / 1 delta gives percentage (counter)
-netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest"} 63945 1500066716438
-# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "steal", value * 1 / 1 delta gives percentage (counter)
-netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="steal"} 0 1500066716438
-# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "softirq", value * 1 / 1 delta gives percentage (counter)
-netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="softirq"} 8295 1500066716438
-# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "irq", value * 1 / 1 delta gives percentage (counter)
-netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="irq"} 4079 1500066716438
-# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "user", value * 1 / 1 delta gives percentage (counter)
-netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="user"} 116488 1500066716438
-# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "system", value * 1 / 1 delta gives percentage (counter)
-netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="system"} 35084 1500066716438
-# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "nice", value * 1 / 1 delta gives percentage (counter)
-netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="nice"} 505 1500066716438
-# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "iowait", value * 1 / 1 delta gives percentage (counter)
-netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="iowait"} 23314 1500066716438
-# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "idle", value * 1 / 1 delta gives percentage (counter)
-netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="idle"} 918470 1500066716438
-```
-
-_(Netdata response for `system.cpu` with source=`as-collected`)_
-
-For more information check Prometheus documentation.
-
-### Streaming data from upstream hosts
-
-The `format=prometheus` parameter only exports the host's Netdata metrics. If you are using the parent-child
-functionality of Netdata this ignores any upstream hosts - so you should consider using the below in your
-**prometheus.yml**:
-
-```yaml
- metrics_path: '/api/v1/allmetrics'
- params:
- format: [ prometheus_all_hosts ]
- honor_labels: true
-```
-
-This will report all upstream host data, and `honor_labels` will make Prometheus take note of the instance names
-provided.
-
-### Timestamps
-
-To pass the metrics through Prometheus pushgateway, Netdata supports the option `&timestamps=no` to send the metrics
-without timestamps.
-
-## Netdata host variables
-
-Netdata collects various system configuration metrics, like the max number of TCP sockets supported, the max number of
-files allowed system-wide, various IPC sizes, etc. These metrics are not exposed to Prometheus by default.
-
-To expose them, append `variables=yes` to the Netdata URL.
-
-### TYPE and HELP
-
-To save bandwidth, and because Prometheus does not use them anyway, `# TYPE` and `# HELP` lines are suppressed. If
-wanted they can be re-enabled via `types=yes` and `help=yes`, e.g.
-`/api/v1/allmetrics?format=prometheus&types=yes&help=yes`
-
-Note that if enabled, the `# TYPE` and `# HELP` lines are repeated for every occurrence of a metric, which goes against
-the Prometheus
-documentation's [specification for these lines](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#comments-help-text-and-type-information).
-
-### Names and IDs
-
-Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and
-names are human friendly labels (also unique).
-
-Most charts and metrics have the same ID and name, but in several cases they are different: disks with device-mapper,
-interrupts, QoS classes, statsd synthetic charts, etc.
-
-The default is controlled in `exporting.conf`:
-
-```conf
-[prometheus:exporter]
- send names instead of ids = yes | no
-```
-
-You can overwrite it from Prometheus, by appending to the URL:
-
-- `&names=no` to get IDs (the old behaviour)
-- `&names=yes` to get names
-
-### Filtering metrics sent to Prometheus
-
-Netdata can filter the metrics it sends to Prometheus with this setting:
-
-```conf
-[prometheus:exporter]
- send charts matching = *
-```
-
-This settings accepts a space separated list
-of [simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) to match the
-**charts** to be sent to Prometheus. Each pattern can use `*` as wildcard, any number of times (e.g `*a*b*c*` is valid).
-Patterns starting with `!` give a negative match (e.g `!*.bad users.* groups.*` will send all the users and groups
-except `bad` user and `bad` group). The order is important: the first match (positive or negative) left to right, is
-used.
-
-### Changing the prefix of Netdata metrics
-
-Netdata sends all metrics prefixed with `netdata_`. You can change this in `netdata.conf`, like this:
-
-```conf
-[prometheus:exporter]
- prefix = netdata
-```
-
-It can also be changed from the URL, by appending `&prefix=netdata`.
-
-### Metric Units
-
-The default source `average` adds the unit of measurement to the name of each metric (e.g. `_KiB_persec`). To hide the
-units and get the same metric names as with the other sources, append to the URL `&hideunits=yes`.
-
-The units were standardized in v1.12, with the effect of changing the metric names. To get the metric names as they were
-before v1.12, append to the URL `&oldunits=yes`
-
-### Accuracy of `average` and `sum` data sources
-
-When the data source is set to `average` or `sum`, Netdata remembers the last access of each client accessing Prometheus
-metrics and uses this last access time to respond with the `average` or `sum` of all the entries in the database since
-that. This means that Prometheus servers are not losing data when they access Netdata with data source = `average` or
-`sum`.
-
-To uniquely identify each Prometheus server, Netdata uses the IP of the client accessing the metrics. If however the IP
-is not good enough for identifying a single Prometheus server (e.g. when Prometheus servers are accessing Netdata
-through a web proxy, or when multiple Prometheus servers are NATed to a single IP), each Prometheus may append
-`&server=NAME` to the URL. This `NAME` is used by Netdata to uniquely identify each Prometheus server and keep track of
-its last access time.
-
-## Configure Prometheus to scrape Netdata metrics
-
-The following `prometheus.yml` file will scrape all netdata metrics "as collected".
-
-Make sure to replace `your.netdata.ip` with the IP or hostname of the host running Netdata.
-
-```yaml
-# my global config
-global:
- scrape_interval: 5s # Set the scrape interval to every 5 seconds. Default is every 1 minute.
- evaluation_interval: 5s # Evaluate rules every 5 seconds. The default is every 1 minute.
- # scrape_timeout is set to the global default (10s).
-
- # Attach these labels to any time series or alerts when communicating with
- # external systems (federation, remote storage, Alertmanager).
- external_labels:
- monitor: 'codelab-monitor'
-
-# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
-rule_files:
-# - "first.rules"
-# - "second.rules"
-
-# A scrape configuration containing exactly one endpoint to scrape:
-# Here it's Prometheus itself.
-scrape_configs:
- # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- - job_name: 'prometheus'
-
- # metrics_path defaults to '/metrics'
- # scheme defaults to 'http'.
-
- static_configs:
- - targets: [ '0.0.0.0:9090' ]
-
- - job_name: 'netdata-scrape'
-
- metrics_path: '/api/v1/allmetrics'
- params:
- # format: prometheus | prometheus_all_hosts
- # You can use `prometheus_all_hosts` if you want Prometheus to set the `instance` to your hostname instead of IP
- format: [ prometheus ]
- #
- # sources: as-collected | raw | average | sum | volume
- # default is: average
- #source: [as-collected]
- #
- # server name for this prometheus - the default is the client IP
- # for Netdata to uniquely identify it
- #server: ['prometheus1']
- honor_labels: true
-
- static_configs:
- - targets: [ '{your.netdata.ip}:19999' ]
-```
-
-### Prometheus alerts for Netdata metrics
-
-The following is an example of a `nodes.yml` file that will allow Prometheus to generate alerts from some Netdata sources.
-Save it at `/opt/prometheus/nodes.yml`, and add a _- "nodes.yml"_ entry under the _rule_files:_ section in the example prometheus.yml file above.
-
-```yaml
-groups:
- - name: nodes
-
- rules:
- - alert: node_high_cpu_usage_70
- expr: sum(sum_over_time(netdata_system_cpu_percentage_average{dimension=~"(user|system|softirq|irq|guest)"}[10m])) by (job) / sum(count_over_time(netdata_system_cpu_percentage_average{dimension="idle"}[10m])) by (job) > 70
- for: 1m
- annotations:
- description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.'
- summary: CPU alert for container node '{{ $labels.job }}'
-
- - alert: node_high_memory_usage_70
- expr: 100 / sum(netdata_system_ram_MB_average) by (job)
- * sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30
- for: 1m
- annotations:
- description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.'
- summary: Memory alert for container node '{{ $labels.job }}'
-
- - alert: node_low_root_filesystem_space_20
- expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job)
- * sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20
- for: 1m
- annotations:
- description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.'
- summary: Root filesystem alert for container node '{{ $labels.job }}'
-
- - alert: node_root_filesystem_fill_rate_6h
- expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0
- for: 1h
- labels:
- severity: critical
- annotations:
- description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h.
- summary: Disk fill alert for Swarm node '{{ $labels.job }}'
-```
diff --git a/exporting/prometheus/integrations/elasticsearch.md b/exporting/prometheus/integrations/elasticsearch.md
deleted file mode 100644
index 94e8d9169..000000000
--- a/exporting/prometheus/integrations/elasticsearch.md
+++ /dev/null
@@ -1,158 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/elasticsearch.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
-sidebar_label: "ElasticSearch"
-learn_status: "Published"
-learn_rel_path: "Exporting"
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
-endmeta-->
-
-# ElasticSearch
-
-
-<img src="https://netdata.cloud/img/elasticsearch.svg" width="150"/>
-
-
-Use the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.
-
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Limitations
-
-The remote write exporting connector does not support buffer on failures.
-
-
-## Setup
-
-### Prerequisites
-
-####
-
-- Netdata and the external storage provider of your choice, installed, configured and operational.
-- `protobuf` and `snappy` libraries installed.
-- Netdata reinstalled after the libraries.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `exporting.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config exporting.conf
-```
-#### Options
-
-The following options can be defined for this exporter.
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |
-| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |
-| username | Username for HTTP authentication | my_username | no |
-| password | Password for HTTP authentication | my_password | no |
-| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |
-| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |
-| prefix | The prefix to add to all metrics. | netdata | no |
-| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
-| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
-| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
-| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
-| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
-| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
-| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |
-
-##### destination
-
-The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
-- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
-- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
-- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
-
-Example IPv4:
- ```yaml
- destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
- ```
-Example IPv6 and IPv4 together:
-```yaml
-destination = [ffff:...:0001]:2003 10.11.12.1:2003
-```
-When multiple servers are defined, Netdata will try the next one when the previous one fails.
-
-
-##### update every
-
-Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
-send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
-
-
-##### buffer on failures
-
-If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
-
-
-##### send hosts matching
-
-Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
-The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
-filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
-
-A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
-use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
-
-
-##### send charts matching
-
-A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
-use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
-positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
-has a higher priority than the configuration option.
-
-
-##### send names instead of ids
-
-Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
-are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
-different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
-
-
-</details>
-
-#### Examples
-
-##### Example configuration
-
-Basic example configuration for Prometheus remote write.
-
-```yaml
-[prometheus_remote_write:my_instance]
- enabled = yes
- destination = 10.11.14.2:2003
- remote write URL path = /receive
-
-```
-##### Example configuration with HTTPS and HTTP authentication
-
-Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.
-
-```yaml
-[prometheus_remote_write:https:my_instance]
- enabled = yes
- destination = 10.11.14.2:2003
- remote write URL path = /receive
- username = my_username
- password = my_password
-
-```
-
diff --git a/exporting/prometheus/integrations/kafka.md b/exporting/prometheus/integrations/kafka.md
deleted file mode 100644
index e052620c9..000000000
--- a/exporting/prometheus/integrations/kafka.md
+++ /dev/null
@@ -1,158 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/kafka.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
-sidebar_label: "Kafka"
-learn_status: "Published"
-learn_rel_path: "Exporting"
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
-endmeta-->
-
-# Kafka
-
-
-<img src="https://netdata.cloud/img/kafka.svg" width="150"/>
-
-
-Use the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.
-
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Limitations
-
-The remote write exporting connector does not support buffer on failures.
-
-
-## Setup
-
-### Prerequisites
-
-####
-
-- Netdata and the external storage provider of your choice, installed, configured and operational.
-- `protobuf` and `snappy` libraries installed.
-- Netdata reinstalled after the libraries.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `exporting.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config exporting.conf
-```
-#### Options
-
-The following options can be defined for this exporter.
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |
-| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |
-| username | Username for HTTP authentication | my_username | no |
-| password | Password for HTTP authentication | my_password | no |
-| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |
-| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |
-| prefix | The prefix to add to all metrics. | netdata | no |
-| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
-| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
-| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
-| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
-| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
-| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
-| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |
-
-##### destination
-
-The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
-- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
-- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
-- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
-
-Example IPv4:
- ```yaml
- destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
- ```
-Example IPv6 and IPv4 together:
-```yaml
-destination = [ffff:...:0001]:2003 10.11.12.1:2003
-```
-When multiple servers are defined, Netdata will try the next one when the previous one fails.
-
-
-##### update every
-
-Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
-send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
-
-
-##### buffer on failures
-
-If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
-
-
-##### send hosts matching
-
-Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
-The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
-filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
-
-A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
-use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
-
-
-##### send charts matching
-
-A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
-use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
-positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
-has a higher priority than the configuration option.
-
-
-##### send names instead of ids
-
-Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
-are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
-different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
-
-
-</details>
-
-#### Examples
-
-##### Example configuration
-
-Basic example configuration for Prometheus remote write.
-
-```yaml
-[prometheus_remote_write:my_instance]
- enabled = yes
- destination = 10.11.14.2:2003
- remote write URL path = /receive
-
-```
-##### Example configuration with HTTPS and HTTP authentication
-
-Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.
-
-```yaml
-[prometheus_remote_write:https:my_instance]
- enabled = yes
- destination = 10.11.14.2:2003
- remote write URL path = /receive
- username = my_username
- password = my_password
-
-```
-
diff --git a/exporting/prometheus/integrations/new_relic.md b/exporting/prometheus/integrations/new_relic.md
deleted file mode 100644
index f488b6203..000000000
--- a/exporting/prometheus/integrations/new_relic.md
+++ /dev/null
@@ -1,158 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/new_relic.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
-sidebar_label: "New Relic"
-learn_status: "Published"
-learn_rel_path: "Exporting"
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
-endmeta-->
-
-# New Relic
-
-
-<img src="https://netdata.cloud/img/newrelic.svg" width="150"/>
-
-
-Use the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.
-
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Limitations
-
-The remote write exporting connector does not support buffer on failures.
-
-
-## Setup
-
-### Prerequisites
-
-####
-
-- Netdata and the external storage provider of your choice, installed, configured and operational.
-- `protobuf` and `snappy` libraries installed.
-- Netdata reinstalled after the libraries.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `exporting.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config exporting.conf
-```
-#### Options
-
-The following options can be defined for this exporter.
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |
-| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |
-| username | Username for HTTP authentication | my_username | no |
-| password | Password for HTTP authentication | my_password | no |
-| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |
-| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |
-| prefix | The prefix to add to all metrics. | netdata | no |
-| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
-| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
-| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
-| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
-| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
-| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
-| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |
-
-##### destination
-
-The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
-- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
-- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
-- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
-
-Example IPv4:
- ```yaml
- destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
- ```
-Example IPv6 and IPv4 together:
-```yaml
-destination = [ffff:...:0001]:2003 10.11.12.1:2003
-```
-When multiple servers are defined, Netdata will try the next one when the previous one fails.
-
-
-##### update every
-
-Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
-send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
-
-
-##### buffer on failures
-
-If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
-
-
-##### send hosts matching
-
-Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
-The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
-filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
-
-A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
-use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
-
-
-##### send charts matching
-
-A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
-use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
-positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
-has a higher priority than the configuration option.
-
-
-##### send names instead of ids
-
-Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
-are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
-different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
-
-
-</details>
-
-#### Examples
-
-##### Example configuration
-
-Basic example configuration for Prometheus remote write.
-
-```yaml
-[prometheus_remote_write:my_instance]
- enabled = yes
- destination = 10.11.14.2:2003
- remote write URL path = /receive
-
-```
-##### Example configuration with HTTPS and HTTP authentication
-
-Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.
-
-```yaml
-[prometheus_remote_write:https:my_instance]
- enabled = yes
- destination = 10.11.14.2:2003
- remote write URL path = /receive
- username = my_username
- password = my_password
-
-```
-
diff --git a/exporting/prometheus/integrations/postgresql.md b/exporting/prometheus/integrations/postgresql.md
deleted file mode 100644
index a1b813398..000000000
--- a/exporting/prometheus/integrations/postgresql.md
+++ /dev/null
@@ -1,158 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/postgresql.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
-sidebar_label: "PostgreSQL"
-learn_status: "Published"
-learn_rel_path: "Exporting"
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
-endmeta-->
-
-# PostgreSQL
-
-
-<img src="https://netdata.cloud/img/postgres.svg" width="150"/>
-
-
-Use the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.
-
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Limitations
-
-The remote write exporting connector does not support buffer on failures.
-
-
-## Setup
-
-### Prerequisites
-
-####
-
-- Netdata and the external storage provider of your choice, installed, configured and operational.
-- `protobuf` and `snappy` libraries installed.
-- Netdata reinstalled after the libraries.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `exporting.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config exporting.conf
-```
-#### Options
-
-The following options can be defined for this exporter.
-
-<details><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |
-| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |
-| username | Username for HTTP authentication | my_username | no |
-| password | Password for HTTP authentication | my_password | no |
-| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |
-| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |
-| prefix | The prefix to add to all metrics. | netdata | no |
-| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
-| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
-| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
-| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
-| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
-| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
-| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |
-
-##### destination
-
-The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
-- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
-- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
-- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
-
-Example IPv4:
- ```yaml
- destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
- ```
-Example IPv6 and IPv4 together:
-```yaml
-destination = [ffff:...:0001]:2003 10.11.12.1:2003
-```
-When multiple servers are defined, Netdata will try the next one when the previous one fails.
-
-
-##### update every
-
-Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
-send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
-
-
-##### buffer on failures
-
-If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
-
-
-##### send hosts matching
-
-Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
-The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
-filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
-
-A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
-use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
-
-
-##### send charts matching
-
-A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
-use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
-positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
-has a higher priority than the configuration option.
-
-
-##### send names instead of ids
-
-Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
-are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
-different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
-
-
-</details>
-
-#### Examples
-
-##### Example configuration
-
-Basic example configuration for Prometheus remote write.
-
-```yaml
-[prometheus_remote_write:my_instance]
- enabled = yes
- destination = 10.11.14.2:2003
- remote write URL path = /receive
-
-```
-##### Example configuration with HTTPS and HTTP authentication
-
-Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.
-
-```yaml
-[prometheus_remote_write:https:my_instance]
- enabled = yes
- destination = 10.11.14.2:2003
- remote write URL path = /receive
- username = my_username
- password = my_password
-
-```
-
diff --git a/exporting/prometheus/metadata.yaml b/exporting/prometheus/metadata.yaml
deleted file mode 100644
index 906d8ea36..000000000
--- a/exporting/prometheus/metadata.yaml
+++ /dev/null
@@ -1,436 +0,0 @@
-# yamllint disable rule:line-length
----
-- &promexport
- id: 'export-prometheus-remote'
- meta: &meta
- name: 'Prometheus Remote Write'
- link: 'https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage'
- categories:
- - export
- icon_filename: 'prometheus.svg'
- keywords:
- - exporter
- - Prometheus
- - remote write
- - time series
- overview:
- exporter_description: |
- Use the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.
- exporter_limitations: 'The remote write exporting connector does not support buffer on failures.'
- setup:
- prerequisites:
- list:
- - title: ''
- description: |
- - Netdata and the external storage provider of your choice, installed, configured and operational.
- - `protobuf` and `snappy` libraries installed.
- - Netdata reinstalled after the libraries.
- configuration:
- file:
- name: 'exporting.conf'
- options:
- description: 'The following options can be defined for this exporter.'
- folding:
- title: 'Config options'
- enabled: true
- list:
- - name: 'enabled'
- default_value: 'no'
- description: 'Enables or disables an exporting connector instance (yes|no).'
- required: true
- - name: 'destination'
- default_value: 'no'
- description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
- required: true
- detailed_description: |
- The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
- - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
- - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
- - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
-
- Example IPv4:
- ```yaml
- destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
- ```
- Example IPv6 and IPv4 together:
- ```yaml
- destination = [ffff:...:0001]:2003 10.11.12.1:2003
- ```
- When multiple servers are defined, Netdata will try the next one when the previous one fails.
- - name: 'username'
- default_value: 'my_username'
- description: 'Username for HTTP authentication'
- required: false
- - name: 'password'
- default_value: 'my_password'
- description: 'Password for HTTP authentication'
- required: false
- - name: 'data source'
- default_value: ''
- description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
- required: false
- - name: 'hostname'
- default_value: '[global].hostname'
- description: 'The hostname to be used for sending data to the external database server.'
- required: false
- - name: 'prefix'
- default_value: 'netdata'
- description: 'The prefix to add to all metrics.'
- required: false
- - name: 'update every'
- default_value: '10'
- description: |
- Frequency of sending sending data to the external database, in seconds.
- required: false
- detailed_description: |
- Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
- send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
- - name: 'buffer on failures'
- default_value: '10'
- description: |
- The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
- required: false
- detailed_description: |
- If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
- - name: 'timeout ms'
- default_value: '20000'
- description: 'The timeout in milliseconds to wait for the external database server to process the data.'
- required: false
- - name: 'send hosts matching'
- default_value: 'localhost *'
- description: |
- Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns).
- required: false
- detailed_description: |
- Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
- The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
- filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
-
- A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
- use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
- - name: 'send charts matching'
- default_value: '*'
- description: |
- One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
- required: false
- detailed_description: |
- A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
- use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
- positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
- has a higher priority than the configuration option.
- - name: 'send names instead of ids'
- default_value: ''
- description: 'Controls the metric names Netdata should send to the external database (yes|no).'
- required: false
- detailed_description: |
- Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
- are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
- different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
- - name: 'send configured labels'
- default_value: ''
- description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
- required: false
- - name: 'send automatic labels'
- default_value: ''
- description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
- required: false
- examples:
- folding:
- enabled: true
- title: ''
- list:
- - name: 'Example configuration'
- folding:
- enabled: false
- description: 'Basic example configuration for Prometheus remote write.'
- config: |
- [prometheus_remote_write:my_instance]
- enabled = yes
- destination = 10.11.14.2:2003
- remote write URL path = /receive
- - name: 'Example configuration with HTTPS and HTTP authentication'
- folding:
- enabled: false
- description: 'Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.'
- config: |
- [prometheus_remote_write:https:my_instance]
- enabled = yes
- destination = 10.11.14.2:2003
- remote write URL path = /receive
- username = my_username
- password = my_password
-- <<: *promexport
- id: 'export-appoptics'
- meta:
- <<: *meta
- name: AppOptics
- link: https://www.solarwinds.com/appoptics
- icon_filename: 'solarwinds.svg'
- keywords:
- - app optics
- - AppOptics
- - Solarwinds
-- <<: *promexport
- id: 'export-azure-data'
- meta:
- <<: *meta
- name: Azure Data Explorer
- link: https://azure.microsoft.com/en-us/pricing/details/data-explorer/
- icon_filename: 'azuredataex.jpg'
- keywords:
- - Azure Data Explorer
- - Azure
-- <<: *promexport
- id: 'export-azure-event'
- meta:
- <<: *meta
- name: Azure Event Hub
- link: https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-about
- icon_filename: 'azureeventhub.png'
- keywords:
- - Azure Event Hub
- - Azure
-- <<: *promexport
- id: 'export-newrelic'
- meta:
- <<: *meta
- name: New Relic
- link: https://newrelic.com/
- icon_filename: 'newrelic.svg'
- keywords:
- - export
- - NewRelic
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-quasar'
- meta:
- <<: *meta
- name: QuasarDB
- link: https://doc.quasar.ai/master/
- icon_filename: 'quasar.jpeg'
- keywords:
- - export
- - quasar
- - quasarDB
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-splunk'
- meta:
- <<: *meta
- name: Splunk SignalFx
- link: https://www.splunk.com/en_us/products/observability.html
- icon_filename: 'splunk.svg'
- keywords:
- - export
- - splunk
- - signalfx
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-tikv'
- meta:
- <<: *meta
- name: TiKV
- link: https://tikv.org/
- icon_filename: 'tikv.png'
- keywords:
- - export
- - TiKV
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-thanos'
- meta:
- <<: *meta
- name: Thanos
- link: https://thanos.io/
- icon_filename: 'thanos.png'
- keywords:
- - export
- - thanos
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-victoria'
- meta:
- <<: *meta
- name: VictoriaMetrics
- link: https://victoriametrics.com/products/open-source/
- icon_filename: 'victoriametrics.png'
- keywords:
- - export
- - victoriametrics
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-vmware'
- meta:
- <<: *meta
- name: VMware Aria
- link: https://www.vmware.com/products/aria-operations-for-applications.html
- icon_filename: 'aria.png'
- keywords:
- - export
- - VMware
- - Aria
- - Tanzu
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-chronix'
- meta:
- <<: *meta
- name: Chronix
- link: https://dbdb.io/db/chronix
- icon_filename: 'chronix.png'
- keywords:
- - export
- - chronix
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-cortex'
- meta:
- <<: *meta
- name: Cortex
- link: https://cortexmetrics.io/
- icon_filename: 'cortex.png'
- keywords:
- - export
- - cortex
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-crate'
- meta:
- <<: *meta
- name: CrateDB
- link: https://crate.io/
- icon_filename: 'crate.svg'
- keywords:
- - export
- - CrateDB
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-elastic'
- meta:
- <<: *meta
- name: ElasticSearch
- link: https://www.elastic.co/
- icon_filename: 'elasticsearch.svg'
- keywords:
- - export
- - ElasticSearch
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-gnocchi'
- meta:
- <<: *meta
- name: Gnocchi
- link: https://wiki.openstack.org/wiki/Gnocchi
- icon_filename: 'gnocchi.svg'
- keywords:
- - export
- - Gnocchi
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-bigquery'
- meta:
- <<: *meta
- name: Google BigQuery
- link: https://cloud.google.com/bigquery/
- icon_filename: 'bigquery.png'
- keywords:
- - export
- - Google BigQuery
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-irondb'
- meta:
- <<: *meta
- name: IRONdb
- link: https://docs.circonus.com/irondb/
- icon_filename: 'irondb.png'
- keywords:
- - export
- - IRONdb
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-kafka'
- meta:
- <<: *meta
- name: Kafka
- link: https://kafka.apache.org/
- icon_filename: 'kafka.svg'
- keywords:
- - export
- - Kafka
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-m3db'
- meta:
- <<: *meta
- name: M3DB
- link: https://m3db.io/
- icon_filename: 'm3db.png'
- keywords:
- - export
- - M3DB
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-metricfire'
- meta:
- <<: *meta
- name: MetricFire
- link: https://www.metricfire.com/
- icon_filename: 'metricfire.png'
- keywords:
- - export
- - MetricFire
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-pgsql'
- meta:
- <<: *meta
- name: PostgreSQL
- link: https://www.postgresql.org/
- icon_filename: 'postgres.svg'
- keywords:
- - export
- - PostgreSQL
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-wavefront'
- meta:
- <<: *meta
- name: Wavefront
- link: https://docs.wavefront.com/wavefront_data_ingestion.html
- icon_filename: 'wavefront.png'
- keywords:
- - export
- - Wavefront
- - prometheus
- - remote write
-- <<: *promexport
- id: 'export-timescaledb'
- meta:
- <<: *meta
- name: TimescaleDB
- link: https://www.timescale.com/
- icon_filename: 'timescale.png'
- keywords:
- - export
- - TimescaleDB
- - prometheus
- - remote write
diff --git a/exporting/prometheus/remote_write/Makefile.am b/exporting/prometheus/remote_write/Makefile.am
deleted file mode 100644
index d049ef48c..000000000
--- a/exporting/prometheus/remote_write/Makefile.am
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-CLEANFILES = \
- remote_write.pb.cc \
- remote_write.pb.h \
- $(NULL)
-
-dist_noinst_DATA = \
- remote_write.proto \
- README.md \
- $(NULL)
diff --git a/exporting/pubsub/Makefile.am b/exporting/pubsub/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/exporting/pubsub/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/exporting/pubsub/metadata.yaml b/exporting/pubsub/metadata.yaml
deleted file mode 100644
index 7f57bb809..000000000
--- a/exporting/pubsub/metadata.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
-# yamllint disable rule:line-length
----
-id: 'export-google-pubsub'
-meta:
- name: 'Google Cloud Pub Sub'
- link: 'https://cloud.google.com/pubsub'
- categories:
- - export
- icon_filename: 'pubsub.png'
-keywords:
- - exporter
- - Google Cloud
- - Pub Sub
-overview:
- exporter_description: |
- Export metrics to Google Cloud Pub/Sub Service
- exporter_limitations: ''
-setup:
- prerequisites:
- list:
- - title: ''
- description: |
- - First [install](https://github.com/googleapis/google-cloud-cpp/) install Google Cloud Platform C++ Client Libraries
- - Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`
- - Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.
- configuration:
- file:
- name: 'exporting.conf'
- options:
- description: |
- The following options can be defined for this exporter.
- folding:
- title: 'Config options'
- enabled: true
- list:
- - name: 'enabled'
- default_value: 'no'
- description: 'Enables or disables an exporting connector instance (yes|no).'
- required: true
- - name: 'destination'
- default_value: 'pubsub.googleapis.com'
- description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
- required: true
- detailed_description: |
- The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
- - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
- - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
- - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
-
- Example IPv4:
- ```yaml
- destination = pubsub.googleapis.com
- ```
- When multiple servers are defined, Netdata will try the next one when the previous one fails.
- - name: 'username'
- default_value: 'my_username'
- description: 'Username for HTTP authentication'
- required: false
- - name: 'password'
- default_value: 'my_password'
- description: 'Password for HTTP authentication'
- required: false
- - name: 'data source'
- default_value: ''
- description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
- required: false
- - name: 'hostname'
- default_value: '[global].hostname'
- description: 'The hostname to be used for sending data to the external database server.'
- required: false
- - name: 'prefix'
- default_value: 'Netdata'
- description: 'The prefix to add to all metrics.'
- required: false
- - name: 'update every'
- default_value: '10'
- description: |
- Frequency of sending sending data to the external database, in seconds.
- required: false
- detailed_description: |
- Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
- send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
- - name: 'buffer on failures'
- default_value: '10'
- description: |
- The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
- required: false
- detailed_description: |
- If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
- - name: 'timeout ms'
- default_value: '2 * update_every * 1000'
- description: 'The timeout in milliseconds to wait for the external database server to process the data.'
- required: false
- - name: 'send hosts matching'
- default_value: 'localhost *'
- description: |
- Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns).
- required: false
- detailed_description: |
- Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
- The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
- filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
-
- A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
- use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
- - name: 'send charts matching'
- default_value: '*'
- description: |
- One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
- required: false
- detailed_description: |
- A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
- use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
- positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
- has a higher priority than the configuration option.
- - name: 'send names instead of ids'
- default_value: ''
- description: 'Controls the metric names Netdata should send to the external database (yes|no).'
- required: false
- detailed_description: |
- Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
- are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
- different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
- - name: 'send configured labels'
- default_value: ''
- description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
- required: false
- - name: 'send automatic labels'
- default_value: ''
- description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
- required: false
- examples:
- folding:
- enabled: true
- title: ''
- list:
- - name: 'Basic configuration'
- folding:
- enabled: false
- description: |
- - Set the destination option to a Pub/Sub service endpoint. pubsub.googleapis.com is the default one.
- - Create the credentials JSON file by following Google Cloud's authentication guide.
- - The user running the Agent (typically netdata) needs read access to google_cloud_credentials.json, which you can set
- `chmod 400 google_cloud_credentials.json; chown netdata google_cloud_credentials.json`
- - Set the credentials file option to the full path of the file.
- config: |
- [pubsub:my_instance]
- enabled = yes
- destination = pubsub.googleapis.com
- credentials file = /etc/netdata/google_cloud_credentials.json
- project id = my_project
- topic id = my_topic
diff --git a/exporting/tests/Makefile.am b/exporting/tests/Makefile.am
deleted file mode 100644
index babdcf0df..000000000
--- a/exporting/tests/Makefile.am
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/exporting/tests/exporting_doubles.c b/exporting/tests/exporting_doubles.c
deleted file mode 100644
index 75ab7ba43..000000000
--- a/exporting/tests/exporting_doubles.c
+++ /dev/null
@@ -1,405 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "test_exporting_engine.h"
-
-struct engine *__real_read_exporting_config();
-struct engine *__wrap_read_exporting_config()
-{
- function_called();
- return mock_ptr_type(struct engine *);
-}
-
-struct engine *__mock_read_exporting_config()
-{
- struct engine *engine = calloc(1, sizeof(struct engine));
- engine->config.hostname = strdupz("test_engine_host");
- engine->config.update_every = 3;
-
-
- engine->instance_root = calloc(1, sizeof(struct instance));
- struct instance *instance = engine->instance_root;
- instance->engine = engine;
- instance->config.type = EXPORTING_CONNECTOR_TYPE_GRAPHITE;
- instance->config.name = strdupz("instance_name");
- instance->config.destination = strdupz("localhost");
- instance->config.username = strdupz("");
- instance->config.password = strdupz("");
- instance->config.prefix = strdupz("netdata");
- instance->config.hostname = strdupz("test-host");
- instance->config.update_every = 1;
- instance->config.buffer_on_failures = 10;
- instance->config.timeoutms = 10000;
- instance->config.charts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
- instance->config.hosts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
- instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
-
- return engine;
-}
-
-int __real_init_connectors(struct engine *engine);
-int __wrap_init_connectors(struct engine *engine)
-{
- function_called();
- check_expected_ptr(engine);
- return mock_type(int);
-}
-
-int __real_mark_scheduled_instances(struct engine *engine);
-int __wrap_mark_scheduled_instances(struct engine *engine)
-{
- function_called();
- check_expected_ptr(engine);
- return mock_type(int);
-}
-
-NETDATA_DOUBLE __real_exporting_calculate_value_from_stored_data(
- struct instance *instance,
- RRDDIM *rd,
- time_t *last_timestamp);
-NETDATA_DOUBLE __wrap_exporting_calculate_value_from_stored_data(
- struct instance *instance,
- RRDDIM *rd,
- time_t *last_timestamp)
-{
- (void)instance;
- (void)rd;
-
- *last_timestamp = 15052;
-
- function_called();
- return mock_type(NETDATA_DOUBLE);
-}
-
-int __real_prepare_buffers(struct engine *engine);
-int __wrap_prepare_buffers(struct engine *engine)
-{
- function_called();
- check_expected_ptr(engine);
- return mock_type(int);
-}
-
-void __wrap_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system)
-{
- function_called();
- check_expected_ptr(st_rusage);
- check_expected_ptr(rd_user);
- check_expected_ptr(rd_system);
-}
-
-void __wrap_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system)
-{
- function_called();
- check_expected_ptr(st_rusage);
- check_expected_ptr(rd_user);
- check_expected_ptr(rd_system);
-}
-
-int __wrap_send_internal_metrics(struct instance *instance)
-{
- function_called();
- check_expected_ptr(instance);
- return mock_type(int);
-}
-
-int __wrap_rrdhost_is_exportable(struct instance *instance, RRDHOST *host)
-{
- function_called();
- check_expected_ptr(instance);
- check_expected_ptr(host);
- return mock_type(int);
-}
-
-int __wrap_rrdset_is_exportable(struct instance *instance, RRDSET *st)
-{
- function_called();
- check_expected_ptr(instance);
- check_expected_ptr(st);
- return mock_type(int);
-}
-
-int __mock_start_batch_formatting(struct instance *instance)
-{
- function_called();
- check_expected_ptr(instance);
- return mock_type(int);
-}
-
-int __mock_start_host_formatting(struct instance *instance, RRDHOST *host)
-{
- function_called();
- check_expected_ptr(instance);
- check_expected_ptr(host);
- return mock_type(int);
-}
-
-int __mock_start_chart_formatting(struct instance *instance, RRDSET *st)
-{
- function_called();
- check_expected_ptr(instance);
- check_expected_ptr(st);
- return mock_type(int);
-}
-
-int __mock_metric_formatting(struct instance *instance, RRDDIM *rd)
-{
- function_called();
- check_expected_ptr(instance);
- check_expected_ptr(rd);
- return mock_type(int);
-}
-
-int __mock_end_chart_formatting(struct instance *instance, RRDSET *st)
-{
- function_called();
- check_expected_ptr(instance);
- check_expected_ptr(st);
- return mock_type(int);
-}
-
-int __mock_variables_formatting(struct instance *instance, RRDHOST *host)
-{
- function_called();
- check_expected_ptr(instance);
- check_expected_ptr(host);
- return mock_type(int);
-}
-
-int __mock_end_host_formatting(struct instance *instance, RRDHOST *host)
-{
- function_called();
- check_expected_ptr(instance);
- check_expected_ptr(host);
- return mock_type(int);
-}
-
-int __mock_end_batch_formatting(struct instance *instance)
-{
- function_called();
- check_expected_ptr(instance);
- return mock_type(int);
-}
-
-int __wrap_simple_connector_end_batch(struct instance *instance)
-{
- function_called();
- check_expected_ptr(instance);
- return mock_type(int);
-}
-
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
-void *__wrap_init_write_request()
-{
- function_called();
- return mock_ptr_type(void *);
-}
-
-void __wrap_add_host_info(
- void *write_request_p,
- const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp)
-{
- function_called();
- check_expected_ptr(write_request_p);
- check_expected_ptr(name);
- check_expected_ptr(instance);
- check_expected_ptr(application);
- check_expected_ptr(version);
- check_expected(timestamp);
-}
-
-void __wrap_add_label(void *write_request_p, char *key, char *value)
-{
- function_called();
- check_expected_ptr(write_request_p);
- check_expected_ptr(key);
- check_expected_ptr(value);
-}
-
-void __wrap_add_metric(
- void *write_request_p,
- const char *name, const char *chart, const char *family, const char *dimension,
- const char *instance, const double value, const int64_t timestamp)
-{
- function_called();
- check_expected_ptr(write_request_p);
- check_expected_ptr(name);
- check_expected_ptr(chart);
- check_expected_ptr(family);
- check_expected_ptr(dimension);
- check_expected_ptr(instance);
- check_expected(value);
- check_expected(timestamp);
-}
-#endif // ENABLE_PROMETHEUS_REMOTE_WRITE
-
-#if HAVE_KINESIS
-void __wrap_aws_sdk_init()
-{
- function_called();
-}
-
-void __wrap_kinesis_init(
- void *kinesis_specific_data_p, const char *region, const char *access_key_id, const char *secret_key,
- const long timeout)
-{
- function_called();
- check_expected_ptr(kinesis_specific_data_p);
- check_expected_ptr(region);
- check_expected_ptr(access_key_id);
- check_expected_ptr(secret_key);
- check_expected(timeout);
-}
-
-void __wrap_kinesis_put_record(
- void *kinesis_specific_data_p, const char *stream_name, const char *partition_key, const char *data,
- size_t data_len)
-{
- function_called();
- check_expected_ptr(kinesis_specific_data_p);
- check_expected_ptr(stream_name);
- check_expected_ptr(partition_key);
- check_expected_ptr(data);
- check_expected_ptr(data);
- check_expected(data_len);
-}
-
-int __wrap_kinesis_get_result(void *request_outcomes_p, char *error_message, size_t *sent_bytes, size_t *lost_bytes)
-{
- function_called();
- check_expected_ptr(request_outcomes_p);
- check_expected_ptr(error_message);
- check_expected_ptr(sent_bytes);
- check_expected_ptr(lost_bytes);
- return mock_type(int);
-}
-#endif // HAVE_KINESIS
-
-#if ENABLE_EXPORTING_PUBSUB
-int __wrap_pubsub_init(
- void *pubsub_specific_data_p, char *error_message, const char *destination, const char *credentials_file,
- const char *project_id, const char *topic_id)
-{
- function_called();
- check_expected_ptr(pubsub_specific_data_p);
- check_expected_ptr(error_message);
- check_expected_ptr(destination);
- check_expected_ptr(credentials_file);
- check_expected_ptr(project_id);
- check_expected_ptr(topic_id);
- return mock_type(int);
-}
-
-int __wrap_pubsub_add_message(void *pubsub_specific_data_p, char *data)
-{
- function_called();
- check_expected_ptr(pubsub_specific_data_p);
- check_expected_ptr(data);
- return mock_type(int);
-}
-
-int __wrap_pubsub_publish(
- void *pubsub_specific_data_p, char *error_message, size_t buffered_metrics, size_t buffered_bytes)
-{
- function_called();
- check_expected_ptr(pubsub_specific_data_p);
- check_expected_ptr(error_message);
- check_expected(buffered_metrics);
- check_expected(buffered_bytes);
- return mock_type(int);
-}
-
-int __wrap_pubsub_get_result(
- void *pubsub_specific_data_p, char *error_message,
- size_t *sent_metrics, size_t *sent_bytes, size_t *lost_metrics, size_t *lost_bytes)
-{
- function_called();
- check_expected_ptr(pubsub_specific_data_p);
- check_expected_ptr(error_message);
- check_expected_ptr(sent_metrics);
- check_expected_ptr(sent_bytes);
- check_expected_ptr(lost_metrics);
- check_expected_ptr(lost_bytes);
- return mock_type(int);
-}
-#endif // ENABLE_EXPORTING_PUBSUB
-
-#if HAVE_MONGOC
-void __wrap_mongoc_init()
-{
- function_called();
-}
-
-mongoc_uri_t * __wrap_mongoc_uri_new_with_error (const char *uri_string, bson_error_t *error)
-{
- function_called();
- check_expected_ptr(uri_string);
- check_expected_ptr(error);
- return mock_ptr_type(mongoc_uri_t *);
-}
-
-int32_t __wrap_mongoc_uri_get_option_as_int32(const mongoc_uri_t *uri, const char *option, int32_t fallback)
-{
- function_called();
- check_expected_ptr(uri);
- check_expected_ptr(option);
- check_expected(fallback);
- return mock_type(int32_t);
-}
-
-bool __wrap_mongoc_uri_set_option_as_int32 (const mongoc_uri_t *uri, const char *option, int32_t value)
-{
- function_called();
- check_expected_ptr(uri);
- check_expected_ptr(option);
- check_expected(value);
- return mock_type(bool);
-}
-
-mongoc_client_t * __wrap_mongoc_client_new_from_uri (const mongoc_uri_t *uri)
-{
- function_called();
- check_expected_ptr(uri);
- return mock_ptr_type(mongoc_client_t *);
-}
-
-bool __wrap_mongoc_client_set_appname (mongoc_client_t *client, const char *appname)
-{
- function_called();
- check_expected_ptr(client);
- check_expected_ptr(appname);
- return mock_type(bool);
-}
-
-mongoc_collection_t *
-__wrap_mongoc_client_get_collection(mongoc_client_t *client, const char *db, const char *collection)
-{
- function_called();
- check_expected_ptr(client);
- check_expected_ptr(db);
- check_expected_ptr(collection);
- return mock_ptr_type(mongoc_collection_t *);
-}
-
-void __wrap_mongoc_uri_destroy (mongoc_uri_t *uri)
-{
- function_called();
- check_expected_ptr(uri);
-}
-
-bool __wrap_mongoc_collection_insert_many(
- mongoc_collection_t *collection,
- const bson_t **documents,
- size_t n_documents,
- const bson_t *opts,
- bson_t *reply,
- bson_error_t *error)
-{
- function_called();
- check_expected_ptr(collection);
- check_expected_ptr(documents);
- check_expected(n_documents);
- check_expected_ptr(opts);
- check_expected_ptr(reply);
- check_expected_ptr(error);
- return mock_type(bool);
-}
-#endif // HAVE_MONGOC
diff --git a/exporting/tests/exporting_fixtures.c b/exporting/tests/exporting_fixtures.c
deleted file mode 100644
index 78159a82d..000000000
--- a/exporting/tests/exporting_fixtures.c
+++ /dev/null
@@ -1,184 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "test_exporting_engine.h"
-
-int setup_configured_engine(void **state)
-{
- struct engine *engine = __mock_read_exporting_config();
- engine->instance_root->data_is_ready = 1;
-
- *state = engine;
-
- return 0;
-}
-
-int teardown_configured_engine(void **state)
-{
- struct engine *engine = *state;
-
- struct instance *instance = engine->instance_root;
- free((void *)instance->config.destination);
- free((void *)instance->config.username);
- free((void *)instance->config.password);
- free((void *)instance->config.name);
- free((void *)instance->config.prefix);
- free((void *)instance->config.hostname);
- simple_pattern_free(instance->config.charts_pattern);
- simple_pattern_free(instance->config.hosts_pattern);
- free(instance);
-
- free((void *)engine->config.hostname);
- free(engine);
-
- return 0;
-}
-
-static void rrddim_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *rrddim, void *st) {
- RRDDIM *rd = rrddim;
-
- rd->id = string_strdupz("dimension_id");
- rd->name = string_strdupz("dimension_name");
-
- rd->rrdset = (RRDSET *)st;
- rd->last_collected_value = 123000321;
- rd->last_collected_time.tv_sec = 15051;
- rd->collections_counter++;
- rd->next = NULL;
-
- rd->tiers[0] = calloc(1, sizeof(struct rrddim_tier));
- rd->tiers[0]->query_ops.oldest_time = __mock_rrddim_query_oldest_time;
- rd->tiers[0]->query_ops.latest_time = __mock_rrddim_query_latest_time;
- rd->tiers[0]->query_ops.init = __mock_rrddim_query_init;
- rd->tiers[0]->query_ops.is_finished = __mock_rrddim_query_is_finished;
- rd->tiers[0]->query_ops.next_metric = __mock_rrddim_query_next_metric;
- rd->tiers[0]->query_ops.finalize = __mock_rrddim_query_finalize;
-}
-
-static void rrdset_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *rrdset, void *constructor_data __maybe_unused) {
- RRDHOST *host = localhost;
- RRDSET *st = rrdset;
-
- // const char *chart_full_id = dictionary_acquired_item_name(item);
-
- st->id = string_strdupz("chart_id");
- st->name = string_strdupz("chart_name");
-
- st->update_every = 1;
- st->rrd_memory_mode = RRD_MEMORY_MODE_SAVE;
-
- st->rrdhost = host;
-
- st->rrddim_root_index = dictionary_create(DICT_OPTION_DONT_OVERWRITE_VALUE);
-
- dictionary_register_insert_callback(st->rrddim_root_index, rrddim_insert_callback, NULL);
-}
-
-int setup_rrdhost()
-{
- localhost = calloc(1, sizeof(RRDHOST));
-
- localhost->rrd_update_every = 1;
-
- localhost->tags = string_strdupz("TAG1=VALUE1 TAG2=VALUE2");
-
- localhost->rrdlabels = rrdlabels_create();
- rrdlabels_add(localhost->rrdlabels, "key1", "value1", RRDLABEL_SRC_CONFIG);
- rrdlabels_add(localhost->rrdlabels, "key2", "value2", RRDLABEL_SRC_CONFIG);
-
- localhost->rrdset_root_index = dictionary_create(DICT_OPTION_DONT_OVERWRITE_VALUE);
- dictionary_register_insert_callback(localhost->rrdset_root_index, rrdset_insert_callback, NULL);
- RRDSET *st = dictionary_set_advanced(localhost->rrdset_root_index, "chart_id", -1, NULL, sizeof(RRDSET), NULL);
-
- st->rrddim_root_index = dictionary_create(DICT_OPTION_DONT_OVERWRITE_VALUE);
- dictionary_register_insert_callback(st->rrddim_root_index, rrddim_insert_callback, NULL);
- st->dimensions = dictionary_set_advanced(st->rrddim_root_index, "dimension_id", -1, NULL, rrddim_size(), st);
-
- return 0;
-}
-
-int teardown_rrdhost()
-{
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- RRDDIM *rd;
- rrddim_foreach_read(rd, st);
- break;
- rrddim_foreach_done(rd);
-
- string_freez(rd->id);
- string_freez(rd->name);
- free(rd->tiers[0]);
-
- string_freez(st->id);
- string_freez(st->name);
- dictionary_destroy(st->rrddim_root_index);
-
- rrdlabels_destroy(localhost->rrdlabels);
-
- string_freez(localhost->tags);
- dictionary_destroy(localhost->rrdset_root_index);
- free(localhost);
-
- return 0;
-}
-
-int setup_initialized_engine(void **state)
-{
- setup_configured_engine(state);
-
- struct engine *engine = *state;
- init_connectors_in_tests(engine);
-
- setup_rrdhost();
-
- return 0;
-}
-
-int teardown_initialized_engine(void **state)
-{
- struct engine *engine = *state;
-
- teardown_rrdhost();
- buffer_free(engine->instance_root->labels_buffer);
- buffer_free(engine->instance_root->buffer);
- teardown_configured_engine(state);
-
- return 0;
-}
-
-int setup_prometheus(void **state)
-{
- (void)state;
-
- prometheus_exporter_instance = calloc(1, sizeof(struct instance));
-
- setup_rrdhost();
-
- prometheus_exporter_instance->config.update_every = 10;
-
- prometheus_exporter_instance->config.options |=
- EXPORTING_OPTION_SEND_NAMES | EXPORTING_OPTION_SEND_CONFIGURED_LABELS | EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
-
- prometheus_exporter_instance->config.charts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
- prometheus_exporter_instance->config.hosts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
-
- prometheus_exporter_instance->config.initialized = 1;
-
- return 0;
-}
-
-int teardown_prometheus(void **state)
-{
- (void)state;
-
- teardown_rrdhost();
-
- simple_pattern_free(prometheus_exporter_instance->config.charts_pattern);
- simple_pattern_free(prometheus_exporter_instance->config.hosts_pattern);
- free(prometheus_exporter_instance);
-
- return 0;
-}
diff --git a/exporting/tests/netdata_doubles.c b/exporting/tests/netdata_doubles.c
deleted file mode 100644
index 7e5017a5f..000000000
--- a/exporting/tests/netdata_doubles.c
+++ /dev/null
@@ -1,255 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "test_exporting_engine.h"
-
-// Use memory allocation functions guarded by CMocka in strdupz
-const char *__wrap_strdupz(const char *s)
-{
- char *duplicate = malloc(sizeof(char) * (strlen(s) + 1));
- strcpy(duplicate, s);
-
- return duplicate;
-}
-
-time_t __wrap_now_realtime_sec(void)
-{
- function_called();
- return mock_type(time_t);
-}
-
-void __wrap_uv_thread_set_name_np(uv_thread_t ut, const char* name)
-{
- (void)ut;
- (void)name;
-
- function_called();
-}
-
-void __wrap_info_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
-{
- (void)file;
- (void)function;
- (void)line;
-
- function_called();
-
- va_list args;
-
- va_start(args, fmt);
- vsnprintf(log_line, MAX_LOG_LINE, fmt, args);
- va_end(args);
-}
-
-int __wrap_connect_to_one_of(
- const char *destination,
- int default_port,
- struct timeval *timeout,
- size_t *reconnects_counter,
- char *connected_to,
- size_t connected_to_size)
-{
- (void)timeout;
-
- function_called();
-
- check_expected(destination);
- check_expected_ptr(default_port);
- // TODO: check_expected_ptr(timeout);
- check_expected(reconnects_counter);
- check_expected(connected_to);
- check_expected(connected_to_size);
-
- return mock_type(int);
-}
-
-void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line)
-{
- (void)host;
- (void)file;
- (void)function;
- (void)line;
-}
-
-void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line)
-{
- (void)st;
- (void)file;
- (void)function;
- (void)line;
-}
-
-void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line)
-{
- (void)file;
- (void)function;
- (void)line;
-}
-
-RRDSET *rrdset_create_custom(
- RRDHOST *host,
- const char *type,
- const char *id,
- const char *name,
- const char *family,
- const char *context,
- const char *title,
- const char *units,
- const char *plugin,
- const char *module,
- long priority,
- int update_every,
- RRDSET_TYPE chart_type,
- RRD_MEMORY_MODE memory_mode,
- long history_entries)
-{
- check_expected_ptr(host);
- check_expected_ptr(type);
- check_expected_ptr(id);
- check_expected_ptr(name);
- check_expected_ptr(family);
- check_expected_ptr(context);
- UNUSED(title);
- check_expected_ptr(units);
- check_expected_ptr(plugin);
- check_expected_ptr(module);
- check_expected(priority);
- check_expected(update_every);
- check_expected(chart_type);
- UNUSED(memory_mode);
- UNUSED(history_entries);
-
- function_called();
-
- return mock_ptr_type(RRDSET *);
-}
-
-void rrdset_next_usec(RRDSET *st, usec_t microseconds)
-{
- check_expected_ptr(st);
- UNUSED(microseconds);
-
- function_called();
-}
-
-void rrdset_done(RRDSET *st)
-{
- check_expected_ptr(st);
-
- function_called();
-}
-
-RRDDIM *rrddim_add_custom(
- RRDSET *st,
- const char *id,
- const char *name,
- collected_number multiplier,
- collected_number divisor,
- RRD_ALGORITHM algorithm,
- RRD_MEMORY_MODE memory_mode)
-{
- check_expected_ptr(st);
- UNUSED(id);
- check_expected_ptr(name);
- check_expected(multiplier);
- check_expected(divisor);
- check_expected(algorithm);
- UNUSED(memory_mode);
-
- function_called();
-
- return NULL;
-}
-
-collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value)
-{
- check_expected_ptr(st);
- UNUSED(rd);
- UNUSED(value);
-
- function_called();
-
- return 0;
-}
-
-const char *rrd_memory_mode_name(RRD_MEMORY_MODE id)
-{
- (void)id;
- return RRD_MEMORY_MODE_NONE_NAME;
-}
-
-void rrdset_update_heterogeneous_flag(RRDSET *st)
-{
- (void)st;
-}
-
-time_t __mock_rrddim_query_oldest_time(STORAGE_METRIC_HANDLE *db_metric_handle)
-{
- (void)db_metric_handle;
-
- function_called();
- return mock_type(time_t);
-}
-
-time_t __mock_rrddim_query_latest_time(STORAGE_METRIC_HANDLE *db_metric_handle)
-{
- (void)db_metric_handle;
-
- function_called();
- return mock_type(time_t);
-}
-
-void __mock_rrddim_query_init(STORAGE_METRIC_HANDLE *db_metric_handle, struct rrddim_query_handle *handle, time_t start_time, time_t end_time)
-{
- (void)db_metric_handle;
- (void)handle;
-
- function_called();
- check_expected(start_time);
- check_expected(end_time);
-}
-
-int __mock_rrddim_query_is_finished(struct rrddim_query_handle *handle)
-{
- (void)handle;
-
- function_called();
- return mock_type(int);
-}
-
-STORAGE_POINT __mock_rrddim_query_next_metric(struct rrddim_query_handle *handle)
-{
- (void)handle;
-
- function_called();
-
- STORAGE_POINT sp = {};
- return sp;
-}
-
-void __mock_rrddim_query_finalize(struct rrddim_query_handle *handle)
-{
- (void)handle;
-
- function_called();
-}
-
-void rrdcalc_update_rrdlabels(RRDSET *st)
-{
- (void)st;
-}
-
-void rrdpush_sender_send_this_host_variable_now(RRDHOST *host, const RRDVAR_ACQUIRED *rva)
-{
- (void)host;
- (void)rva;
-}
-
-void db_execute(const char *cmd)
-{
- (void)cmd;
-}
-
-DICTIONARY *rrdfamily_rrdvars_dict(const RRDFAMILY_ACQUIRED *rfa) {
- (void)rfa;
- return NULL;
-}
diff --git a/exporting/tests/system_doubles.c b/exporting/tests/system_doubles.c
deleted file mode 100644
index ca85800c0..000000000
--- a/exporting/tests/system_doubles.c
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "test_exporting_engine.h"
-
-void __wrap_uv_thread_create(uv_thread_t thread, void (*worker)(void *arg), void *arg)
-{
- function_called();
-
- check_expected_ptr(thread);
- check_expected_ptr(worker);
- check_expected_ptr(arg);
-}
-
-void __wrap_uv_mutex_lock(uv_mutex_t *mutex)
-{
- (void)mutex;
-}
-
-void __wrap_uv_mutex_unlock(uv_mutex_t *mutex)
-{
- (void)mutex;
-}
-
-void __wrap_uv_cond_signal(uv_cond_t *cond_var)
-{
- (void)cond_var;
-}
-
-void __wrap_uv_cond_wait(uv_cond_t *cond_var, uv_mutex_t *mutex)
-{
- (void)cond_var;
- (void)mutex;
-}
-
-ssize_t __wrap_recv(int sockfd, void *buf, size_t len, int flags)
-{
- function_called();
-
- check_expected(sockfd);
- check_expected_ptr(buf);
- check_expected(len);
- check_expected(flags);
-
- char *mock_string = "Test recv";
- strcpy(buf, mock_string);
-
- return strlen(mock_string);
-}
-
-ssize_t __wrap_send(int sockfd, const void *buf, size_t len, int flags)
-{
- function_called();
-
- check_expected(sockfd);
- check_expected_ptr(buf);
- check_expected_ptr(buf);
- check_expected(len);
- check_expected(flags);
-
- return strlen(buf);
-}
diff --git a/exporting/tests/test_exporting_engine.c b/exporting/tests/test_exporting_engine.c
deleted file mode 100644
index b83f5de25..000000000
--- a/exporting/tests/test_exporting_engine.c
+++ /dev/null
@@ -1,2068 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "test_exporting_engine.h"
-#include "libnetdata/required_dummies.h"
-
-RRDHOST *localhost;
-netdata_rwlock_t rrd_rwlock;
-
-// global variables needed by read_exporting_config()
-struct config netdata_config;
-char *netdata_configured_user_config_dir = ".";
-char *netdata_configured_stock_config_dir = ".";
-char *netdata_configured_hostname = "test_global_host";
-bool global_statistics_enabled = true;
-
-char log_line[MAX_LOG_LINE + 1];
-
-void init_connectors_in_tests(struct engine *engine)
-{
- expect_function_call(__wrap_now_realtime_sec);
- will_return(__wrap_now_realtime_sec, 2);
-
- expect_function_call(__wrap_uv_thread_create);
-
- expect_value(__wrap_uv_thread_create, thread, &engine->instance_root->thread);
- expect_value(__wrap_uv_thread_create, worker, simple_connector_worker);
- expect_value(__wrap_uv_thread_create, arg, engine->instance_root);
-
- expect_function_call(__wrap_uv_thread_set_name_np);
-
- assert_int_equal(__real_init_connectors(engine), 0);
-
- assert_int_equal(engine->now, 2);
- assert_int_equal(engine->instance_root->after, 2);
-}
-
-static void test_exporting_engine(void **state)
-{
- struct engine *engine = *state;
-
- expect_function_call(__wrap_read_exporting_config);
- will_return(__wrap_read_exporting_config, engine);
-
- expect_function_call(__wrap_init_connectors);
- expect_memory(__wrap_init_connectors, engine, engine, sizeof(struct engine));
- will_return(__wrap_init_connectors, 0);
-
- expect_function_call(__wrap_create_main_rusage_chart);
- expect_not_value(__wrap_create_main_rusage_chart, st_rusage, NULL);
- expect_not_value(__wrap_create_main_rusage_chart, rd_user, NULL);
- expect_not_value(__wrap_create_main_rusage_chart, rd_system, NULL);
-
- expect_function_call(__wrap_now_realtime_sec);
- will_return(__wrap_now_realtime_sec, 2);
-
- expect_function_call(__wrap_mark_scheduled_instances);
- expect_memory(__wrap_mark_scheduled_instances, engine, engine, sizeof(struct engine));
- will_return(__wrap_mark_scheduled_instances, 1);
-
- expect_function_call(__wrap_prepare_buffers);
- expect_memory(__wrap_prepare_buffers, engine, engine, sizeof(struct engine));
- will_return(__wrap_prepare_buffers, 0);
-
- expect_function_call(__wrap_send_main_rusage);
- expect_value(__wrap_send_main_rusage, st_rusage, NULL);
- expect_value(__wrap_send_main_rusage, rd_user, NULL);
- expect_value(__wrap_send_main_rusage, rd_system, NULL);
-
- void *ptr = malloc(sizeof(struct netdata_static_thread));
- assert_ptr_equal(exporting_main(ptr), NULL);
- assert_int_equal(engine->now, 2);
- free(ptr);
-}
-
-static void test_read_exporting_config(void **state)
-{
- struct engine *engine = __mock_read_exporting_config(); // TODO: use real read_exporting_config() function
- *state = engine;
-
- assert_ptr_not_equal(engine, NULL);
- assert_string_equal(engine->config.hostname, "test_engine_host");
- assert_int_equal(engine->config.update_every, 3);
- assert_int_equal(engine->instance_num, 0);
-
-
- struct instance *instance = engine->instance_root;
- assert_ptr_not_equal(instance, NULL);
- assert_ptr_equal(instance->next, NULL);
- assert_ptr_equal(instance->engine, engine);
- assert_int_equal(instance->config.type, EXPORTING_CONNECTOR_TYPE_GRAPHITE);
- assert_string_equal(instance->config.destination, "localhost");
- assert_string_equal(instance->config.prefix, "netdata");
- assert_int_equal(instance->config.update_every, 1);
- assert_int_equal(instance->config.buffer_on_failures, 10);
- assert_int_equal(instance->config.timeoutms, 10000);
- assert_true(simple_pattern_matches(instance->config.charts_pattern, "any_chart"));
- assert_true(simple_pattern_matches(instance->config.hosts_pattern, "anyt_host"));
- assert_int_equal(instance->config.options, EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES);
-
- teardown_configured_engine(state);
-}
-
-static void test_init_connectors(void **state)
-{
- struct engine *engine = *state;
-
- init_connectors_in_tests(engine);
-
- assert_int_equal(engine->instance_num, 1);
-
- struct instance *instance = engine->instance_root;
-
- assert_ptr_equal(instance->next, NULL);
- assert_int_equal(instance->index, 0);
-
- struct simple_connector_config *connector_specific_config = instance->config.connector_specific_config;
- assert_int_equal(connector_specific_config->default_port, 2003);
-
- assert_ptr_equal(instance->worker, simple_connector_worker);
- assert_ptr_equal(instance->start_batch_formatting, NULL);
- assert_ptr_equal(instance->start_host_formatting, format_host_labels_graphite_plaintext);
- assert_ptr_equal(instance->start_chart_formatting, NULL);
- assert_ptr_equal(instance->metric_formatting, format_dimension_collected_graphite_plaintext);
- assert_ptr_equal(instance->end_chart_formatting, NULL);
- assert_ptr_equal(instance->end_host_formatting, flush_host_labels);
-
- BUFFER *buffer = instance->buffer;
- assert_ptr_not_equal(buffer, NULL);
- buffer_sprintf(buffer, "%s", "graphite test");
- assert_string_equal(buffer_tostring(buffer), "graphite test");
-}
-
-static void test_init_graphite_instance(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
- assert_int_equal(init_graphite_instance(instance), 0);
- assert_int_equal(
- ((struct simple_connector_config *)(instance->config.connector_specific_config))->default_port, 2003);
- freez(instance->config.connector_specific_config);
- assert_ptr_equal(instance->metric_formatting, format_dimension_collected_graphite_plaintext);
- assert_ptr_not_equal(instance->buffer, NULL);
- buffer_free(instance->buffer);
-
- instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
- assert_int_equal(init_graphite_instance(instance), 0);
- assert_ptr_equal(instance->metric_formatting, format_dimension_stored_graphite_plaintext);
-}
-
-static void test_init_json_instance(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
- assert_int_equal(init_json_instance(instance), 0);
- assert_int_equal(
- ((struct simple_connector_config *)(instance->config.connector_specific_config))->default_port, 5448);
- freez(instance->config.connector_specific_config);
- assert_ptr_equal(instance->metric_formatting, format_dimension_collected_json_plaintext);
- assert_ptr_not_equal(instance->buffer, NULL);
- buffer_free(instance->buffer);
-
- instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
- assert_int_equal(init_json_instance(instance), 0);
- assert_ptr_equal(instance->metric_formatting, format_dimension_stored_json_plaintext);
-}
-
-static void test_init_opentsdb_telnet_instance(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
- assert_int_equal(init_opentsdb_telnet_instance(instance), 0);
- assert_int_equal(
- ((struct simple_connector_config *)(instance->config.connector_specific_config))->default_port, 4242);
- freez(instance->config.connector_specific_config);
- assert_ptr_equal(instance->metric_formatting, format_dimension_collected_opentsdb_telnet);
- assert_ptr_not_equal(instance->buffer, NULL);
- buffer_free(instance->buffer);
-
- instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
- assert_int_equal(init_opentsdb_telnet_instance(instance), 0);
- assert_ptr_equal(instance->metric_formatting, format_dimension_stored_opentsdb_telnet);
-}
-
-static void test_init_opentsdb_http_instance(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
- assert_int_equal(init_opentsdb_http_instance(instance), 0);
- assert_int_equal(
- ((struct simple_connector_config *)(instance->config.connector_specific_config))->default_port, 4242);
- freez(instance->config.connector_specific_config);
- assert_ptr_equal(instance->metric_formatting, format_dimension_collected_opentsdb_http);
- assert_ptr_not_equal(instance->buffer, NULL);
- buffer_free(instance->buffer);
-
- instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
- assert_int_equal(init_opentsdb_http_instance(instance), 0);
- assert_ptr_equal(instance->metric_formatting, format_dimension_stored_opentsdb_http);
-}
-
-static void test_mark_scheduled_instances(void **state)
-{
- struct engine *engine = *state;
-
- assert_int_equal(__real_mark_scheduled_instances(engine), 1);
-
- struct instance *instance = engine->instance_root;
- assert_int_equal(instance->scheduled, 1);
- assert_int_equal(instance->before, 2);
-}
-
-static void test_rrdhost_is_exportable(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- expect_function_call(__wrap_info_int);
-
- assert_ptr_equal(localhost->exporting_flags, NULL);
-
- assert_int_equal(__real_rrdhost_is_exportable(instance, localhost), 1);
-
- assert_string_equal(log_line, "enabled exporting of host 'localhost' for instance 'instance_name'");
-
- assert_ptr_not_equal(localhost->exporting_flags, NULL);
- assert_int_equal(localhost->exporting_flags[0], RRDHOST_FLAG_EXPORTING_SEND);
-}
-
-static void test_false_rrdhost_is_exportable(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- simple_pattern_free(instance->config.hosts_pattern);
- instance->config.hosts_pattern = simple_pattern_create("!*", NULL, SIMPLE_PATTERN_EXACT);
-
- expect_function_call(__wrap_info_int);
-
- assert_ptr_equal(localhost->exporting_flags, NULL);
-
- assert_int_equal(__real_rrdhost_is_exportable(instance, localhost), 0);
-
- assert_string_equal(log_line, "disabled exporting of host 'localhost' for instance 'instance_name'");
-
- assert_ptr_not_equal(localhost->exporting_flags, NULL);
- assert_int_equal(localhost->exporting_flags[0], RRDHOST_FLAG_EXPORTING_DONT_SEND);
-}
-
-static void test_rrdset_is_exportable(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- assert_ptr_equal(st->exporting_flags, NULL);
-
- assert_int_equal(__real_rrdset_is_exportable(instance, st), 1);
-
- assert_ptr_not_equal(st->exporting_flags, NULL);
- assert_int_equal(st->exporting_flags[0], RRDSET_FLAG_EXPORTING_SEND);
-}
-
-static void test_false_rrdset_is_exportable(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- simple_pattern_free(instance->config.charts_pattern);
- instance->config.charts_pattern = simple_pattern_create("!*", NULL, SIMPLE_PATTERN_EXACT);
-
- assert_ptr_equal(st->exporting_flags, NULL);
-
- assert_int_equal(__real_rrdset_is_exportable(instance, st), 0);
-
- assert_ptr_not_equal(st->exporting_flags, NULL);
- assert_int_equal(st->exporting_flags[0], RRDSET_FLAG_EXPORTING_IGNORE);
-}
-
-static void test_exporting_calculate_value_from_stored_data(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- RRDDIM *rd;
- rrddim_foreach_read(rd, st);
- break;
- rrddim_foreach_done(rd);
-
- time_t timestamp;
-
- instance->after = 3;
- instance->before = 10;
-
- expect_function_call(__mock_rrddim_query_oldest_time);
- will_return(__mock_rrddim_query_oldest_time, 1);
-
- expect_function_call(__mock_rrddim_query_latest_time);
- will_return(__mock_rrddim_query_latest_time, 2);
-
- expect_function_call(__mock_rrddim_query_init);
- expect_value(__mock_rrddim_query_init, start_time, 1);
- expect_value(__mock_rrddim_query_init, end_time, 2);
-
- expect_function_call(__mock_rrddim_query_is_finished);
- will_return(__mock_rrddim_query_is_finished, 0);
- expect_function_call(__mock_rrddim_query_next_metric);
-
- expect_function_call(__mock_rrddim_query_is_finished);
- will_return(__mock_rrddim_query_is_finished, 0);
- expect_function_call(__mock_rrddim_query_next_metric);
-
- expect_function_call(__mock_rrddim_query_is_finished);
- will_return(__mock_rrddim_query_is_finished, 1);
-
- expect_function_call(__mock_rrddim_query_finalize);
-
- assert_float_equal(__real_exporting_calculate_value_from_stored_data(instance, rd, &timestamp), 36, 0.1);
-}
-
-static void test_prepare_buffers(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->start_batch_formatting = __mock_start_batch_formatting;
- instance->start_host_formatting = __mock_start_host_formatting;
- instance->start_chart_formatting = __mock_start_chart_formatting;
- instance->metric_formatting = __mock_metric_formatting;
- instance->end_chart_formatting = __mock_end_chart_formatting;
- instance->end_host_formatting = __mock_end_host_formatting;
- instance->end_batch_formatting = __mock_end_batch_formatting;
- __real_mark_scheduled_instances(engine);
-
- expect_function_call(__mock_start_batch_formatting);
- expect_value(__mock_start_batch_formatting, instance, instance);
- will_return(__mock_start_batch_formatting, 0);
-
- expect_function_call(__wrap_rrdhost_is_exportable);
- expect_value(__wrap_rrdhost_is_exportable, instance, instance);
- expect_value(__wrap_rrdhost_is_exportable, host, localhost);
- will_return(__wrap_rrdhost_is_exportable, 1);
-
- expect_function_call(__mock_start_host_formatting);
- expect_value(__mock_start_host_formatting, instance, instance);
- expect_value(__mock_start_host_formatting, host, localhost);
- will_return(__mock_start_host_formatting, 0);
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- expect_function_call(__wrap_rrdset_is_exportable);
- expect_value(__wrap_rrdset_is_exportable, instance, instance);
- expect_value(__wrap_rrdset_is_exportable, st, st);
- will_return(__wrap_rrdset_is_exportable, 1);
-
- expect_function_call(__mock_start_chart_formatting);
- expect_value(__mock_start_chart_formatting, instance, instance);
- expect_value(__mock_start_chart_formatting, st, st);
- will_return(__mock_start_chart_formatting, 0);
-
- RRDDIM *rd;
- rrddim_foreach_read(rd, st);
- break;
- rrddim_foreach_done(rd);
- expect_function_call(__mock_metric_formatting);
- expect_value(__mock_metric_formatting, instance, instance);
- expect_value(__mock_metric_formatting, rd, rd);
- will_return(__mock_metric_formatting, 0);
-
- expect_function_call(__mock_end_chart_formatting);
- expect_value(__mock_end_chart_formatting, instance, instance);
- expect_value(__mock_end_chart_formatting, st, st);
- will_return(__mock_end_chart_formatting, 0);
-
- expect_function_call(__mock_end_host_formatting);
- expect_value(__mock_end_host_formatting, instance, instance);
- expect_value(__mock_end_host_formatting, host, localhost);
- will_return(__mock_end_host_formatting, 0);
-
- expect_function_call(__mock_end_batch_formatting);
- expect_value(__mock_end_batch_formatting, instance, instance);
- will_return(__mock_end_batch_formatting, 0);
-
- __real_prepare_buffers(engine);
-
- assert_int_equal(instance->stats.buffered_metrics, 1);
-
- // check with NULL functions
- instance->start_batch_formatting = NULL;
- instance->start_host_formatting = NULL;
- instance->start_chart_formatting = NULL;
- instance->metric_formatting = NULL;
- instance->end_chart_formatting = NULL;
- instance->end_host_formatting = NULL;
- instance->end_batch_formatting = NULL;
- __real_prepare_buffers(engine);
-
- assert_int_equal(instance->scheduled, 0);
- assert_int_equal(instance->after, 2);
-}
-
-static void test_exporting_name_copy(void **state)
-{
- (void)state;
-
- char *source_name = "test.name-with/special#characters_";
- char destination_name[RRD_ID_LENGTH_MAX + 1];
-
- assert_int_equal(exporting_name_copy(destination_name, source_name, RRD_ID_LENGTH_MAX), 34);
- assert_string_equal(destination_name, "test.name_with_special_characters_");
-}
-
-static void test_format_dimension_collected_graphite_plaintext(void **state)
-{
- struct engine *engine = *state;
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- RRDDIM *rd;
- rrddim_foreach_read(rd, st);
- break;
- rrddim_foreach_done(rd);
- assert_int_equal(format_dimension_collected_graphite_plaintext(engine->instance_root, rd), 0);
- assert_string_equal(
- buffer_tostring(engine->instance_root->buffer),
- "netdata.test-host.chart_name.dimension_name;TAG1=VALUE1 TAG2=VALUE2 123000321 15051\n");
-}
-
-static void test_format_dimension_stored_graphite_plaintext(void **state)
-{
- struct engine *engine = *state;
-
- expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- RRDDIM *rd;
- rrddim_foreach_read(rd, st);
- break;
- rrddim_foreach_done(rd);
- assert_int_equal(format_dimension_stored_graphite_plaintext(engine->instance_root, rd), 0);
- assert_string_equal(
- buffer_tostring(engine->instance_root->buffer),
- "netdata.test-host.chart_name.dimension_name;TAG1=VALUE1 TAG2=VALUE2 690565856.0000000 15052\n");
-}
-
-static void test_format_dimension_collected_json_plaintext(void **state)
-{
- struct engine *engine = *state;
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- RRDDIM *rd;
- rrddim_foreach_read(rd, st);
- break;
- rrddim_foreach_done(rd);
- assert_int_equal(format_dimension_collected_json_plaintext(engine->instance_root, rd), 0);
- assert_string_equal(
- buffer_tostring(engine->instance_root->buffer),
- "{\"prefix\":\"netdata\",\"hostname\":\"test-host\",\"host_tags\":\"TAG1=VALUE1 TAG2=VALUE2\","
- "\"chart_id\":\"chart_id\",\"chart_name\":\"chart_name\",\"chart_family\":\"\","
- "\"chart_context\":\"\",\"chart_type\":\"\",\"units\":\"\",\"id\":\"dimension_id\","
- "\"name\":\"dimension_name\",\"value\":123000321,\"timestamp\":15051}\n");
-}
-
-static void test_format_dimension_stored_json_plaintext(void **state)
-{
- struct engine *engine = *state;
-
- expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- RRDDIM *rd;
- rrddim_foreach_read(rd, st);
- break;
- rrddim_foreach_done(rd);
- assert_int_equal(format_dimension_stored_json_plaintext(engine->instance_root, rd), 0);
- assert_string_equal(
- buffer_tostring(engine->instance_root->buffer),
- "{\"prefix\":\"netdata\",\"hostname\":\"test-host\",\"host_tags\":\"TAG1=VALUE1 TAG2=VALUE2\","
- "\"chart_id\":\"chart_id\",\"chart_name\":\"chart_name\",\"chart_family\":\"\"," \
- "\"chart_context\": \"\",\"chart_type\":\"\",\"units\": \"\",\"id\":\"dimension_id\","
- "\"name\":\"dimension_name\",\"value\":690565856.0000000,\"timestamp\": 15052}\n");
-}
-
-static void test_format_dimension_collected_opentsdb_telnet(void **state)
-{
- struct engine *engine = *state;
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- RRDDIM *rd;
- rrddim_foreach_read(rd, st);
- break;
- rrddim_foreach_done(rd);
- assert_int_equal(format_dimension_collected_opentsdb_telnet(engine->instance_root, rd), 0);
- assert_string_equal(
- buffer_tostring(engine->instance_root->buffer),
- "put netdata.chart_name.dimension_name 15051 123000321 host=test-host TAG1=VALUE1 TAG2=VALUE2\n");
-}
-
-static void test_format_dimension_stored_opentsdb_telnet(void **state)
-{
- struct engine *engine = *state;
-
- expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- RRDDIM *rd;
- rrddim_foreach_read(rd, st);
- break;
- rrddim_foreach_done(rd);
- assert_int_equal(format_dimension_stored_opentsdb_telnet(engine->instance_root, rd), 0);
- assert_string_equal(
- buffer_tostring(engine->instance_root->buffer),
- "put netdata.chart_name.dimension_name 15052 690565856.0000000 host=test-host TAG1=VALUE1 TAG2=VALUE2\n");
-}
-
-static void test_format_dimension_collected_opentsdb_http(void **state)
-{
- struct engine *engine = *state;
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- RRDDIM *rd;
- rrddim_foreach_read(rd, st);
- break;
- rrddim_foreach_done(rd);
- assert_int_equal(format_dimension_collected_opentsdb_http(engine->instance_root, rd), 0);
- assert_string_equal(
- buffer_tostring(engine->instance_root->buffer),
- "{\"metric\":\"netdata.chart_name.dimension_name\","
- "\"timestamp\":15051,"
- "\"value\":123000321,"
- "\"tags\":{\"host\":\"test-host TAG1=VALUE1 TAG2=VALUE2\"}}");
-}
-
-static void test_format_dimension_stored_opentsdb_http(void **state)
-{
- struct engine *engine = *state;
-
- expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- RRDDIM *rd;
- rrddim_foreach_read(rd, st);
- break;
- rrddim_foreach_done(rd);
- assert_int_equal(format_dimension_stored_opentsdb_http(engine->instance_root, rd), 0);
- assert_string_equal(
- buffer_tostring(engine->instance_root->buffer),
- "{\"metric\":\"netdata.chart_name.dimension_name\","
- "\"timestamp\":15052,"
- "\"value\":690565856.0000000,"
- "\"tags\":{\"host\":\"test-host TAG1=VALUE1 TAG2=VALUE2\"}}");
-}
-
-static void test_exporting_discard_response(void **state)
-{
- struct engine *engine = *state;
-
- BUFFER *response = buffer_create(0, NULL);
- buffer_sprintf(response, "Test response");
-
- assert_int_equal(exporting_discard_response(response, engine->instance_root), 0);
- assert_int_equal(buffer_strlen(response), 0);
-
- buffer_free(response);
-}
-
-static void test_simple_connector_receive_response(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
- struct stats *stats = &instance->stats;
-
- int sock = 1;
-
- expect_function_call(__wrap_recv);
- expect_value(__wrap_recv, sockfd, 1);
- expect_not_value(__wrap_recv, buf, 0);
- expect_value(__wrap_recv, len, 4096);
- expect_value(__wrap_recv, flags, MSG_DONTWAIT);
-
- simple_connector_receive_response(&sock, instance);
-
- assert_int_equal(stats->received_bytes, 9);
- assert_int_equal(stats->receptions, 1);
- assert_int_equal(sock, 1);
-}
-
-static void test_simple_connector_send_buffer(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
- struct stats *stats = &instance->stats;
-
- int sock = 1;
- int failures = 3;
- size_t buffered_metrics = 1;
- BUFFER *header = buffer_create(0, NULL);
- BUFFER *buffer = buffer_create(0, NULL);
- buffer_strcat(header, "test header\n");
- buffer_strcat(buffer, "test buffer\n");
-
- expect_function_call(__wrap_send);
- expect_value(__wrap_send, sockfd, 1);
- expect_value(__wrap_send, buf, buffer_tostring(header));
- expect_string(__wrap_send, buf, "test header\n");
- expect_value(__wrap_send, len, 12);
- expect_value(__wrap_send, flags, MSG_NOSIGNAL);
-
- expect_function_call(__wrap_send);
- expect_value(__wrap_send, sockfd, 1);
- expect_value(__wrap_send, buf, buffer_tostring(buffer));
- expect_string(__wrap_send, buf, "test buffer\n");
- expect_value(__wrap_send, len, 12);
- expect_value(__wrap_send, flags, MSG_NOSIGNAL);
-
- simple_connector_send_buffer(&sock, &failures, instance, header, buffer, buffered_metrics);
-
- assert_int_equal(failures, 0);
- assert_int_equal(stats->transmission_successes, 1);
- assert_int_equal(stats->sent_bytes, 12);
- assert_int_equal(stats->sent_metrics, 1);
- assert_int_equal(stats->transmission_failures, 0);
-
- assert_int_equal(buffer_strlen(buffer), 0);
-
- assert_int_equal(sock, 1);
-}
-
-static void test_simple_connector_worker(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
- struct stats *stats = &instance->stats;
-
- __real_mark_scheduled_instances(engine);
-
- struct simple_connector_data *simple_connector_data = callocz(1, sizeof(struct simple_connector_data));
- instance->connector_specific_data = simple_connector_data;
- simple_connector_data->last_buffer = callocz(1, sizeof(struct simple_connector_buffer));
- simple_connector_data->first_buffer = simple_connector_data->last_buffer;
- simple_connector_data->header = buffer_create(0, NULL);
- simple_connector_data->buffer = buffer_create(0, NULL);
- simple_connector_data->last_buffer->header = buffer_create(0, NULL);
- simple_connector_data->last_buffer->buffer = buffer_create(0, NULL);
- strcpy(simple_connector_data->connected_to, "localhost");
-
- buffer_sprintf(simple_connector_data->last_buffer->header, "test header");
- buffer_sprintf(simple_connector_data->last_buffer->buffer, "test buffer");
-
- expect_function_call(__wrap_now_realtime_sec);
- will_return(__wrap_now_realtime_sec, 2);
-
- expect_function_call(__wrap_now_realtime_sec);
- will_return(__wrap_now_realtime_sec, 2);
-
- expect_function_call(__wrap_now_realtime_sec);
- will_return(__wrap_now_realtime_sec, 2);
-
- expect_function_call(__wrap_send_internal_metrics);
- expect_value(__wrap_send_internal_metrics, instance, instance);
- will_return(__wrap_send_internal_metrics, 0);
-
- simple_connector_worker(instance);
-
- assert_int_equal(stats->buffered_metrics, 0);
- assert_int_equal(stats->buffered_bytes, 0);
- assert_int_equal(stats->received_bytes, 0);
- assert_int_equal(stats->sent_bytes, 0);
- assert_int_equal(stats->sent_metrics, 0);
- assert_int_equal(stats->lost_metrics, 0);
- assert_int_equal(stats->receptions, 0);
- assert_int_equal(stats->transmission_successes, 0);
- assert_int_equal(stats->transmission_failures, 0);
- assert_int_equal(stats->data_lost_events, 0);
- assert_int_equal(stats->lost_bytes, 0);
- assert_int_equal(stats->reconnects, 0);
-}
-
-static void test_sanitize_json_string(void **state)
-{
- (void)state;
-
- char *src = "check \t\\\" string";
- char dst[19 + 1];
-
- sanitize_json_string(dst, src, 19);
-
- assert_string_equal(dst, "check _\\\\\\\" string");
-}
-
-static void test_sanitize_graphite_label_value(void **state)
-{
- (void)state;
-
- char *src = "check ;~ string";
- char dst[15 + 1];
-
- sanitize_graphite_label_value(dst, src, 15);
-
- assert_string_equal(dst, "check____string");
-}
-
-static void test_sanitize_opentsdb_label_value(void **state)
-{
- (void)state;
-
- char *src = "check \t\\\" #&$? -_./ string";
- char dst[26 + 1];
-
- sanitize_opentsdb_label_value(dst, src, 26);
-
- assert_string_equal(dst, "check__________-_./_string");
-}
-
-static void test_format_host_labels_json_plaintext(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
- instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
-
- assert_int_equal(format_host_labels_json_plaintext(instance, localhost), 0);
- assert_string_equal(buffer_tostring(instance->labels_buffer), "\"labels\":{\"key1\":\"value1\",\"key2\":\"value2\"},");
-}
-
-static void test_format_host_labels_graphite_plaintext(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
- instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
-
- assert_int_equal(format_host_labels_graphite_plaintext(instance, localhost), 0);
- assert_string_equal(buffer_tostring(instance->labels_buffer), ";key1=value1;key2=value2");
-}
-
-static void test_format_host_labels_opentsdb_telnet(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
- instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
-
- assert_int_equal(format_host_labels_opentsdb_telnet(instance, localhost), 0);
- assert_string_equal(buffer_tostring(instance->labels_buffer), " key1=value1 key2=value2");
-}
-
-static void test_format_host_labels_opentsdb_http(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
- instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
-
- assert_int_equal(format_host_labels_opentsdb_http(instance, localhost), 0);
- assert_string_equal(buffer_tostring(instance->labels_buffer), ",\"key1\":\"value1\",\"key2\":\"value2\"");
-}
-
-static void test_flush_host_labels(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->labels_buffer = buffer_create(12, NULL);
- buffer_strcat(instance->labels_buffer, "check string");
- assert_int_equal(buffer_strlen(instance->labels_buffer), 12);
-
- assert_int_equal(flush_host_labels(instance, localhost), 0);
- assert_int_equal(buffer_strlen(instance->labels_buffer), 0);
-}
-
-static void test_create_main_rusage_chart(void **state)
-{
- UNUSED(state);
-
- RRDSET *st_rusage = calloc(1, sizeof(RRDSET));
- RRDDIM *rd_user = NULL;
- RRDDIM *rd_system = NULL;
-
- expect_function_call(rrdset_create_custom);
- expect_value(rrdset_create_custom, host, localhost);
- expect_string(rrdset_create_custom, type, "netdata");
- expect_string(rrdset_create_custom, id, "exporting_main_thread_cpu");
- expect_value(rrdset_create_custom, name, NULL);
- expect_string(rrdset_create_custom, family, "exporting");
- expect_string(rrdset_create_custom, context, "netdata.exporting_cpu_usage");
- expect_string(rrdset_create_custom, units, "milliseconds/s");
- expect_string(rrdset_create_custom, plugin, "exporting");
- expect_value(rrdset_create_custom, module, NULL);
- expect_value(rrdset_create_custom, priority, 130600);
- expect_value(rrdset_create_custom, update_every, localhost->rrd_update_every);
- expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_STACKED);
- will_return(rrdset_create_custom, st_rusage);
-
- expect_function_calls(rrddim_add_custom, 2);
- expect_value_count(rrddim_add_custom, st, st_rusage, 2);
- expect_value_count(rrddim_add_custom, name, NULL, 2);
- expect_value_count(rrddim_add_custom, multiplier, 1, 2);
- expect_value_count(rrddim_add_custom, divisor, 1000, 2);
- expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_INCREMENTAL, 2);
-
- __real_create_main_rusage_chart(&st_rusage, &rd_user, &rd_system);
-
- free(st_rusage);
-}
-
-static void test_send_main_rusage(void **state)
-{
- UNUSED(state);
-
- RRDSET *st_rusage = calloc(1, sizeof(RRDSET));
- st_rusage->counter_done = 1;
-
- expect_function_call(rrdset_next_usec);
- expect_value(rrdset_next_usec, st, st_rusage);
-
- expect_function_calls(rrddim_set_by_pointer, 2);
- expect_value_count(rrddim_set_by_pointer, st, st_rusage, 2);
-
- expect_function_call(rrdset_done);
- expect_value(rrdset_done, st, st_rusage);
-
- __real_send_main_rusage(st_rusage, NULL, NULL);
-
- free(st_rusage);
-}
-
-static void test_send_internal_metrics(void **state)
-{
- UNUSED(state);
-
- struct instance *instance = calloc(1, sizeof(struct instance));
- instance->config.name = (const char *)strdupz("test_instance");
- instance->config.update_every = 2;
-
- struct stats *stats = &instance->stats;
-
- stats->st_metrics = calloc(1, sizeof(RRDSET));
- stats->st_metrics->counter_done = 1;
- stats->st_bytes = calloc(1, sizeof(RRDSET));
- stats->st_bytes->counter_done = 1;
- stats->st_ops = calloc(1, sizeof(RRDSET));
- stats->st_ops->counter_done = 1;
- stats->st_rusage = calloc(1, sizeof(RRDSET));
- stats->st_rusage->counter_done = 1;
-
- // ------------------------------------------------------------------------
-
- expect_function_call(rrdset_create_custom);
- expect_value(rrdset_create_custom, host, localhost);
- expect_string(rrdset_create_custom, type, "netdata");
- expect_string(rrdset_create_custom, id, "exporting_test_instance_metrics");
- expect_value(rrdset_create_custom, name, NULL);
- expect_string(rrdset_create_custom, family, "exporting");
- expect_string(rrdset_create_custom, context, "exporting_buffer");
- expect_string(rrdset_create_custom, units, "metrics");
- expect_string(rrdset_create_custom, plugin, "exporting");
- expect_value(rrdset_create_custom, module, NULL);
- expect_value(rrdset_create_custom, priority, 130610);
- expect_value(rrdset_create_custom, update_every, 2);
- expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_LINE);
- will_return(rrdset_create_custom, stats->st_metrics);
-
- expect_function_calls(rrddim_add_custom, 3);
- expect_value_count(rrddim_add_custom, st, stats->st_metrics, 3);
- expect_value_count(rrddim_add_custom, name, NULL, 3);
- expect_value_count(rrddim_add_custom, multiplier, 1, 3);
- expect_value_count(rrddim_add_custom, divisor, 1, 3);
- expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 3);
-
- // ------------------------------------------------------------------------
-
- expect_function_call(rrdset_create_custom);
- expect_value(rrdset_create_custom, host, localhost);
- expect_string(rrdset_create_custom, type, "netdata");
- expect_string(rrdset_create_custom, id, "exporting_test_instance_bytes");
- expect_value(rrdset_create_custom, name, NULL);
- expect_string(rrdset_create_custom, family, "exporting");
- expect_string(rrdset_create_custom, context, "exporting_data_size");
- expect_string(rrdset_create_custom, units, "KiB");
- expect_string(rrdset_create_custom, plugin, "exporting");
- expect_value(rrdset_create_custom, module, NULL);
- expect_value(rrdset_create_custom, priority, 130620);
- expect_value(rrdset_create_custom, update_every, 2);
- expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_AREA);
- will_return(rrdset_create_custom, stats->st_bytes);
-
- expect_function_calls(rrddim_add_custom, 4);
- expect_value_count(rrddim_add_custom, st, stats->st_bytes, 4);
- expect_value_count(rrddim_add_custom, name, NULL, 4);
- expect_value_count(rrddim_add_custom, multiplier, 1, 4);
- expect_value_count(rrddim_add_custom, divisor, 1024, 4);
- expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 4);
-
- // ------------------------------------------------------------------------
-
- expect_function_call(rrdset_create_custom);
- expect_value(rrdset_create_custom, host, localhost);
- expect_string(rrdset_create_custom, type, "netdata");
- expect_string(rrdset_create_custom, id, "exporting_test_instance_ops");
- expect_value(rrdset_create_custom, name, NULL);
- expect_string(rrdset_create_custom, family, "exporting");
- expect_string(rrdset_create_custom, context, "exporting_operations");
- expect_string(rrdset_create_custom, units, "operations");
- expect_string(rrdset_create_custom, plugin, "exporting");
- expect_value(rrdset_create_custom, module, NULL);
- expect_value(rrdset_create_custom, priority, 130630);
- expect_value(rrdset_create_custom, update_every, 2);
- expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_LINE);
- will_return(rrdset_create_custom, stats->st_ops);
-
- expect_function_calls(rrddim_add_custom, 5);
- expect_value_count(rrddim_add_custom, st, stats->st_ops, 5);
- expect_value_count(rrddim_add_custom, name, NULL, 5);
- expect_value_count(rrddim_add_custom, multiplier, 1, 5);
- expect_value_count(rrddim_add_custom, divisor, 1, 5);
- expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 5);
-
- // ------------------------------------------------------------------------
-
- expect_function_call(rrdset_create_custom);
- expect_value(rrdset_create_custom, host, localhost);
- expect_string(rrdset_create_custom, type, "netdata");
- expect_string(rrdset_create_custom, id, "exporting_test_instance_thread_cpu");
- expect_value(rrdset_create_custom, name, NULL);
- expect_string(rrdset_create_custom, family, "exporting");
- expect_string(rrdset_create_custom, context, "exporting_instance");
- expect_string(rrdset_create_custom, units, "milliseconds/s");
- expect_string(rrdset_create_custom, plugin, "exporting");
- expect_value(rrdset_create_custom, module, NULL);
- expect_value(rrdset_create_custom, priority, 130640);
- expect_value(rrdset_create_custom, update_every, 2);
- expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_STACKED);
- will_return(rrdset_create_custom, stats->st_rusage);
-
- expect_function_calls(rrddim_add_custom, 2);
- expect_value_count(rrddim_add_custom, st, stats->st_rusage, 2);
- expect_value_count(rrddim_add_custom, name, NULL, 2);
- expect_value_count(rrddim_add_custom, multiplier, 1, 2);
- expect_value_count(rrddim_add_custom, divisor, 1000, 2);
- expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_INCREMENTAL, 2);
-
- // ------------------------------------------------------------------------
-
- expect_function_call(rrdset_next_usec);
- expect_value(rrdset_next_usec, st, stats->st_metrics);
-
- expect_function_calls(rrddim_set_by_pointer, 3);
- expect_value_count(rrddim_set_by_pointer, st, stats->st_metrics, 3);
-
- expect_function_call(rrdset_done);
- expect_value(rrdset_done, st, stats->st_metrics);
-
- // ------------------------------------------------------------------------
-
- expect_function_call(rrdset_next_usec);
- expect_value(rrdset_next_usec, st, stats->st_bytes);
-
- expect_function_calls(rrddim_set_by_pointer, 4);
- expect_value_count(rrddim_set_by_pointer, st, stats->st_bytes, 4);
-
- expect_function_call(rrdset_done);
- expect_value(rrdset_done, st, stats->st_bytes);
-
- // ------------------------------------------------------------------------
-
- expect_function_call(rrdset_next_usec);
- expect_value(rrdset_next_usec, st, stats->st_ops);
-
- expect_function_calls(rrddim_set_by_pointer, 5);
- expect_value_count(rrddim_set_by_pointer, st, stats->st_ops, 5);
-
- expect_function_call(rrdset_done);
- expect_value(rrdset_done, st, stats->st_ops);
-
- // ------------------------------------------------------------------------
-
- expect_function_call(rrdset_next_usec);
- expect_value(rrdset_next_usec, st, stats->st_rusage);
-
- expect_function_calls(rrddim_set_by_pointer, 2);
- expect_value_count(rrddim_set_by_pointer, st, stats->st_rusage, 2);
-
- expect_function_call(rrdset_done);
- expect_value(rrdset_done, st, stats->st_rusage);
-
- // ------------------------------------------------------------------------
-
- __real_send_internal_metrics(instance);
-
- free(stats->st_metrics);
- free(stats->st_bytes);
- free(stats->st_ops);
- free(stats->st_rusage);
- free((void *)instance->config.name);
- free(instance);
-}
-
-static void test_can_send_rrdset(void **state)
-{
- (void)*state;
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- assert_int_equal(can_send_rrdset(prometheus_exporter_instance, st, NULL), 1);
-
- rrdset_flag_set(st, RRDSET_FLAG_EXPORTING_IGNORE);
- assert_int_equal(can_send_rrdset(prometheus_exporter_instance, st, NULL), 0);
- rrdset_flag_clear(st, RRDSET_FLAG_EXPORTING_IGNORE);
-
- // TODO: test with a denying simple pattern
-
- rrdset_flag_set(st, RRDSET_FLAG_OBSOLETE);
- assert_int_equal(can_send_rrdset(prometheus_exporter_instance, st, NULL), 0);
- rrdset_flag_clear(st, RRDSET_FLAG_OBSOLETE);
-
- st->rrd_memory_mode = RRD_MEMORY_MODE_NONE;
- prometheus_exporter_instance->config.options |= EXPORTING_SOURCE_DATA_AVERAGE;
- assert_int_equal(can_send_rrdset(prometheus_exporter_instance, st, NULL), 0);
-}
-
-static void test_prometheus_name_copy(void **state)
-{
- (void)*state;
-
- char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
- assert_int_equal(prometheus_name_copy(destination_name, "test-name", PROMETHEUS_ELEMENT_MAX), 9);
-
- assert_string_equal(destination_name, "test_name");
-}
-
-static void test_prometheus_label_copy(void **state)
-{
- (void)*state;
-
- char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
- assert_int_equal(prometheus_label_copy(destination_name, "test\"\\\nlabel", PROMETHEUS_ELEMENT_MAX), 15);
-
- assert_string_equal(destination_name, "test\\\"\\\\\\\nlabel");
-}
-
-static void test_prometheus_units_copy(void **state)
-{
- (void)*state;
-
- char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
- assert_string_equal(prometheus_units_copy(destination_name, "test-units", PROMETHEUS_ELEMENT_MAX, 0), "_test_units");
- assert_string_equal(destination_name, "_test_units");
-
- assert_string_equal(prometheus_units_copy(destination_name, "%", PROMETHEUS_ELEMENT_MAX, 0), "_percent");
- assert_string_equal(prometheus_units_copy(destination_name, "test-units/s", PROMETHEUS_ELEMENT_MAX, 0), "_test_units_persec");
-
- assert_string_equal(prometheus_units_copy(destination_name, "KiB", PROMETHEUS_ELEMENT_MAX, 1), "_KB");
-}
-
-static void test_format_host_labels_prometheus(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
- instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
-
- format_host_labels_prometheus(instance, localhost);
- assert_string_equal(buffer_tostring(instance->labels_buffer), "key1=\"value1\",key2=\"value2\"");
-}
-
-static void rrd_stats_api_v1_charts_allmetrics_prometheus(void **state)
-{
- (void)state;
-
- BUFFER *buffer = buffer_create(0, NULL);
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- localhost->hostname = string_strdupz("test_hostname");
- st->family = string_strdupz("test_family");
- st->context = string_strdupz("test_context");
-
- expect_function_call(__wrap_now_realtime_sec);
- will_return(__wrap_now_realtime_sec, 2);
-
- expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
-
- rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(localhost, NULL, buffer, "test_server", "test_prefix", 0, 0);
-
- assert_string_equal(
- buffer_tostring(buffer),
- "netdata_info{instance=\"test_hostname\",application=\"\",version=\"\",key1=\"value1\",key2=\"value2\"} 1\n"
- "test_prefix_test_context{chart=\"chart_id\",family=\"test_family\",dimension=\"dimension_id\"} 690565856.0000000\n");
-
- buffer_flush(buffer);
-
- expect_function_call(__wrap_now_realtime_sec);
- will_return(__wrap_now_realtime_sec, 2);
-
- expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
-
- rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
- localhost, NULL, buffer, "test_server", "test_prefix", 0, PROMETHEUS_OUTPUT_NAMES | PROMETHEUS_OUTPUT_TYPES);
-
- assert_string_equal(
- buffer_tostring(buffer),
- "netdata_info{instance=\"test_hostname\",application=\"\",version=\"\",key1=\"value1\",key2=\"value2\"} 1\n"
- "# TYPE test_prefix_test_context gauge\n"
- "test_prefix_test_context{chart=\"chart_name\",family=\"test_family\",dimension=\"dimension_name\"} 690565856.0000000\n");
-
- buffer_flush(buffer);
-
- expect_function_call(__wrap_now_realtime_sec);
- will_return(__wrap_now_realtime_sec, 2);
-
- expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
-
- rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(localhost, NULL, buffer, "test_server", "test_prefix", 0, 0);
-
- assert_string_equal(
- buffer_tostring(buffer),
- "netdata_info{instance=\"test_hostname\",application=\"\",version=\"\",key1=\"value1\",key2=\"value2\"} 1\n"
- "test_prefix_test_context{chart=\"chart_id\",family=\"test_family\",dimension=\"dimension_id\",instance=\"test_hostname\"} 690565856.0000000\n");
-
- free(st->context);
- free(st->family);
- free(localhost->hostname);
- buffer_free(buffer);
-}
-
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
-static void test_init_prometheus_remote_write_instance(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- expect_function_call(__wrap_init_write_request);
- will_return(__wrap_init_write_request, 0xff);
-
- assert_int_equal(init_prometheus_remote_write_instance(instance), 0);
-
- assert_ptr_equal(instance->worker, simple_connector_worker);
- assert_ptr_equal(instance->start_batch_formatting, NULL);
- assert_ptr_equal(instance->start_host_formatting, format_host_prometheus_remote_write);
- assert_ptr_equal(instance->start_chart_formatting, format_chart_prometheus_remote_write);
- assert_ptr_equal(instance->metric_formatting, format_dimension_prometheus_remote_write);
- assert_ptr_equal(instance->end_chart_formatting, NULL);
- assert_ptr_equal(instance->end_host_formatting, NULL);
- assert_ptr_equal(instance->end_batch_formatting, format_batch_prometheus_remote_write);
- assert_ptr_equal(instance->prepare_header, prometheus_remote_write_prepare_header);
- assert_ptr_equal(instance->check_response, process_prometheus_remote_write_response);
-
- assert_ptr_not_equal(instance->buffer, NULL);
- buffer_free(instance->buffer);
-
- struct prometheus_remote_write_specific_data *connector_specific_data =
- (struct prometheus_remote_write_specific_data *)instance->connector_specific_data;
-
- assert_ptr_not_equal(instance->connector_specific_data, NULL);
- assert_ptr_not_equal(connector_specific_data->write_request, NULL);
- freez(instance->connector_specific_data);
-}
-
-static void test_prometheus_remote_write_prepare_header(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- struct prometheus_remote_write_specific_config *connector_specific_config =
- callocz(1, sizeof(struct prometheus_remote_write_specific_config));
- instance->config.connector_specific_config = connector_specific_config;
- connector_specific_config->remote_write_path = strdupz("/receive");
-
- struct simple_connector_data *simple_connector_data = callocz(1, sizeof(struct simple_connector_data));
- instance->connector_specific_data = simple_connector_data;
- simple_connector_data->last_buffer = callocz(1, sizeof(struct simple_connector_buffer));
- simple_connector_data->last_buffer->header = buffer_create(0, NULL);
- simple_connector_data->last_buffer->buffer = buffer_create(0, NULL);
- strcpy(simple_connector_data->connected_to, "localhost");
-
- buffer_sprintf(simple_connector_data->last_buffer->buffer, "test buffer");
-
- prometheus_remote_write_prepare_header(instance);
-
- assert_string_equal(
- buffer_tostring(simple_connector_data->last_buffer->header),
- "POST /receive HTTP/1.1\r\n"
- "Host: localhost\r\n"
- "Accept: */*\r\n"
- "Content-Encoding: snappy\r\n"
- "Content-Type: application/x-protobuf\r\n"
- "X-Prometheus-Remote-Write-Version: 0.1.0\r\n"
- "Content-Length: 11\r\n"
- "\r\n");
-
- free(connector_specific_config->remote_write_path);
-
- buffer_free(simple_connector_data->last_buffer->header);
- buffer_free(simple_connector_data->last_buffer->buffer);
-}
-
-static void test_process_prometheus_remote_write_response(void **state)
-{
- (void)state;
- BUFFER *buffer = buffer_create(0, NULL);
-
- buffer_sprintf(buffer, "HTTP/1.1 200 OK\r\n");
- assert_int_equal(process_prometheus_remote_write_response(buffer, NULL), 0);
-
- buffer_free(buffer);
-}
-
-static void test_format_host_prometheus_remote_write(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
- instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
-
- struct simple_connector_data *simple_connector_data = mallocz(sizeof(struct simple_connector_data *));
- instance->connector_specific_data = simple_connector_data;
- struct prometheus_remote_write_specific_data *connector_specific_data =
- mallocz(sizeof(struct prometheus_remote_write_specific_data *));
- simple_connector_data->connector_specific_data = (void *)connector_specific_data;
- connector_specific_data->write_request = (void *)0xff;
-
- localhost->program_name = string_strdupz("test_program");
- localhost->program_version = string_strdupz("test_version");
-
- expect_function_call(__wrap_add_host_info);
- expect_value(__wrap_add_host_info, write_request_p, 0xff);
- expect_string(__wrap_add_host_info, name, "netdata_info");
- expect_string(__wrap_add_host_info, instance, "test-host");
- expect_string(__wrap_add_host_info, application, "test_program");
- expect_string(__wrap_add_host_info, version, "test_version");
- expect_in_range(
- __wrap_add_host_info, timestamp, now_realtime_usec() / USEC_PER_MS - 1000, now_realtime_usec() / USEC_PER_MS);
-
- expect_function_call(__wrap_add_label);
- expect_value(__wrap_add_label, write_request_p, 0xff);
- expect_string(__wrap_add_label, key, "key1");
- expect_string(__wrap_add_label, value, "value1");
-
- expect_function_call(__wrap_add_label);
- expect_value(__wrap_add_label, write_request_p, 0xff);
- expect_string(__wrap_add_label, key, "key2");
- expect_string(__wrap_add_label, value, "value2");
-
- assert_int_equal(format_host_prometheus_remote_write(instance, localhost), 0);
-
- freez(connector_specific_data);
- freez(simple_connector_data);
- free(localhost->program_name);
- free(localhost->program_version);
-}
-
-static void test_format_dimension_prometheus_remote_write(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- struct simple_connector_data *simple_connector_data = mallocz(sizeof(struct simple_connector_data *));
- instance->connector_specific_data = simple_connector_data;
- struct prometheus_remote_write_specific_data *connector_specific_data =
- mallocz(sizeof(struct prometheus_remote_write_specific_data *));
- simple_connector_data->connector_specific_data = (void *)connector_specific_data;
- connector_specific_data->write_request = (void *)0xff;
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- RRDDIM *rd;
- rrddim_foreach_read(rd, st);
- break;
- rrddim_foreach_done(rd);
-
- expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
-
- expect_function_call(__wrap_add_metric);
- expect_value(__wrap_add_metric, write_request_p, 0xff);
- expect_string(__wrap_add_metric, name, "netdata_");
- expect_string(__wrap_add_metric, chart, "");
- expect_string(__wrap_add_metric, family, "");
- expect_string(__wrap_add_metric, dimension, "dimension_name");
- expect_string(__wrap_add_metric, instance, "test-host");
- expect_value(__wrap_add_metric, value, 0x292932e0);
- expect_value(__wrap_add_metric, timestamp, 15052 * MSEC_PER_SEC);
-
- assert_int_equal(format_dimension_prometheus_remote_write(instance, rd), 0);
-}
-
-static void test_format_batch_prometheus_remote_write(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- struct simple_connector_data *simple_connector_data = mallocz(sizeof(struct simple_connector_data *));
- instance->connector_specific_data = simple_connector_data;
- struct prometheus_remote_write_specific_data *connector_specific_data =
- mallocz(sizeof(struct prometheus_remote_write_specific_data *));
- simple_connector_data->connector_specific_data = (void *)connector_specific_data;
- connector_specific_data->write_request = __real_init_write_request();
-
- expect_function_call(__wrap_simple_connector_end_batch);
- expect_value(__wrap_simple_connector_end_batch, instance, instance);
- will_return(__wrap_simple_connector_end_batch, 0);
- __real_add_host_info(
- connector_specific_data->write_request,
- "test_name", "test_instance", "test_application", "test_version", 15051);
-
- __real_add_label(connector_specific_data->write_request, "test_key", "test_value");
-
- __real_add_metric(
- connector_specific_data->write_request,
- "test_name", "test chart", "test_family", "test_dimension", "test_instance",
- 123000321, 15052);
-
- assert_int_equal(format_batch_prometheus_remote_write(instance), 0);
-
- BUFFER *buffer = instance->buffer;
- char *write_request_string = calloc(1, 1000);
- convert_write_request_to_string(buffer_tostring(buffer), buffer_strlen(buffer), write_request_string, 999);
- assert_int_equal(strlen(write_request_string), 753);
- assert_string_equal(
- write_request_string,
- "timeseries {\n"
- " labels {\n"
- " name: \"__name__\"\n"
- " value: \"test_name\"\n"
- " }\n"
- " labels {\n"
- " name: \"instance\"\n"
- " value: \"test_instance\"\n"
- " }\n"
- " labels {\n"
- " name: \"application\"\n"
- " value: \"test_application\"\n"
- " }\n"
- " labels {\n"
- " name: \"version\"\n"
- " value: \"test_version\"\n"
- " }\n"
- " labels {\n"
- " name: \"test_key\"\n"
- " value: \"test_value\"\n"
- " }\n"
- " samples {\n"
- " value: 1\n"
- " timestamp: 15051\n"
- " }\n"
- "}\n"
- "timeseries {\n"
- " labels {\n"
- " name: \"__name__\"\n"
- " value: \"test_name\"\n"
- " }\n"
- " labels {\n"
- " name: \"chart\"\n"
- " value: \"test chart\"\n"
- " }\n"
- " labels {\n"
- " name: \"family\"\n"
- " value: \"test_family\"\n"
- " }\n"
- " labels {\n"
- " name: \"dimension\"\n"
- " value: \"test_dimension\"\n"
- " }\n"
- " labels {\n"
- " name: \"instance\"\n"
- " value: \"test_instance\"\n"
- " }\n"
- " samples {\n"
- " value: 123000321\n"
- " timestamp: 15052\n"
- " }\n"
- "}\n");
- free(write_request_string);
-
- protocol_buffers_shutdown();
-}
-#endif // ENABLE_PROMETHEUS_REMOTE_WRITE
-
-#if HAVE_KINESIS
-static void test_init_aws_kinesis_instance(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
-
- struct aws_kinesis_specific_config *connector_specific_config =
- callocz(1, sizeof(struct aws_kinesis_specific_config));
- instance->config.connector_specific_config = connector_specific_config;
- connector_specific_config->stream_name = strdupz("test_stream");
- connector_specific_config->auth_key_id = strdupz("test_auth_key_id");
- connector_specific_config->secure_key = strdupz("test_secure_key");
-
- expect_function_call(__wrap_aws_sdk_init);
- expect_function_call(__wrap_kinesis_init);
- expect_not_value(__wrap_kinesis_init, kinesis_specific_data_p, NULL);
- expect_string(__wrap_kinesis_init, region, "localhost");
- expect_string(__wrap_kinesis_init, access_key_id, "test_auth_key_id");
- expect_string(__wrap_kinesis_init, secret_key, "test_secure_key");
- expect_value(__wrap_kinesis_init, timeout, 10000);
-
- assert_int_equal(init_aws_kinesis_instance(instance), 0);
-
- assert_ptr_equal(instance->worker, aws_kinesis_connector_worker);
- assert_ptr_equal(instance->start_batch_formatting, NULL);
- assert_ptr_equal(instance->start_host_formatting, format_host_labels_json_plaintext);
- assert_ptr_equal(instance->start_chart_formatting, NULL);
- assert_ptr_equal(instance->metric_formatting, format_dimension_collected_json_plaintext);
- assert_ptr_equal(instance->end_chart_formatting, NULL);
- assert_ptr_equal(instance->end_host_formatting, flush_host_labels);
- assert_ptr_equal(instance->end_batch_formatting, NULL);
- assert_ptr_not_equal(instance->buffer, NULL);
- buffer_free(instance->buffer);
- assert_ptr_not_equal(instance->connector_specific_data, NULL);
- freez(instance->connector_specific_data);
-
- instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
-
- expect_function_call(__wrap_kinesis_init);
- expect_not_value(__wrap_kinesis_init, kinesis_specific_data_p, NULL);
- expect_string(__wrap_kinesis_init, region, "localhost");
- expect_string(__wrap_kinesis_init, access_key_id, "test_auth_key_id");
- expect_string(__wrap_kinesis_init, secret_key, "test_secure_key");
- expect_value(__wrap_kinesis_init, timeout, 10000);
-
- assert_int_equal(init_aws_kinesis_instance(instance), 0);
- assert_ptr_equal(instance->metric_formatting, format_dimension_stored_json_plaintext);
-
- free(connector_specific_config->stream_name);
- free(connector_specific_config->auth_key_id);
- free(connector_specific_config->secure_key);
-}
-
-static void test_aws_kinesis_connector_worker(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
- struct stats *stats = &instance->stats;
- BUFFER *buffer = instance->buffer;
-
- __real_mark_scheduled_instances(engine);
-
- expect_function_call(__wrap_rrdhost_is_exportable);
- expect_value(__wrap_rrdhost_is_exportable, instance, instance);
- expect_value(__wrap_rrdhost_is_exportable, host, localhost);
- will_return(__wrap_rrdhost_is_exportable, 1);
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- expect_function_call(__wrap_rrdset_is_exportable);
- expect_value(__wrap_rrdset_is_exportable, instance, instance);
- expect_value(__wrap_rrdset_is_exportable, st, st);
- will_return(__wrap_rrdset_is_exportable, 1);
-
- expect_function_call(__wrap_simple_connector_end_batch);
- expect_value(__wrap_simple_connector_end_batch, instance, instance);
- will_return(__wrap_simple_connector_end_batch, 0);
- __real_prepare_buffers(engine);
-
- struct aws_kinesis_specific_config *connector_specific_config =
- callocz(1, sizeof(struct aws_kinesis_specific_config));
- instance->config.connector_specific_config = connector_specific_config;
- connector_specific_config->stream_name = strdupz("test_stream");
- connector_specific_config->auth_key_id = strdupz("test_auth_key_id");
- connector_specific_config->secure_key = strdupz("test_secure_key");
-
- struct aws_kinesis_specific_data *connector_specific_data = callocz(1, sizeof(struct aws_kinesis_specific_data));
- instance->connector_specific_data = (void *)connector_specific_data;
-
- expect_function_call(__wrap_kinesis_put_record);
- expect_not_value(__wrap_kinesis_put_record, kinesis_specific_data_p, NULL);
- expect_string(__wrap_kinesis_put_record, stream_name, "test_stream");
- expect_string(__wrap_kinesis_put_record, partition_key, "netdata_0");
- expect_value(__wrap_kinesis_put_record, data, buffer_tostring(buffer));
- // The buffer is prepared by Graphite exporting connector
- expect_string(
- __wrap_kinesis_put_record, data,
- "netdata.test-host.chart_name.dimension_name;TAG1=VALUE1 TAG2=VALUE2 123000321 15051\n");
- expect_value(__wrap_kinesis_put_record, data_len, 84);
-
- expect_function_call(__wrap_kinesis_get_result);
- expect_value(__wrap_kinesis_get_result, request_outcomes_p, NULL);
- expect_not_value(__wrap_kinesis_get_result, error_message, NULL);
- expect_not_value(__wrap_kinesis_get_result, sent_bytes, NULL);
- expect_not_value(__wrap_kinesis_get_result, lost_bytes, NULL);
- will_return(__wrap_kinesis_get_result, 0);
-
- expect_function_call(__wrap_send_internal_metrics);
- expect_value(__wrap_send_internal_metrics, instance, instance);
- will_return(__wrap_send_internal_metrics, 0);
-
- aws_kinesis_connector_worker(instance);
-
- assert_int_equal(stats->buffered_metrics, 0);
- assert_int_equal(stats->buffered_bytes, 84);
- assert_int_equal(stats->received_bytes, 0);
- assert_int_equal(stats->sent_bytes, 84);
- assert_int_equal(stats->sent_metrics, 1);
- assert_int_equal(stats->lost_metrics, 0);
- assert_int_equal(stats->receptions, 1);
- assert_int_equal(stats->transmission_successes, 1);
- assert_int_equal(stats->transmission_failures, 0);
- assert_int_equal(stats->data_lost_events, 0);
- assert_int_equal(stats->lost_bytes, 0);
- assert_int_equal(stats->reconnects, 0);
-
- free(connector_specific_config->stream_name);
- free(connector_specific_config->auth_key_id);
- free(connector_specific_config->secure_key);
-}
-#endif // HAVE_KINESIS
-
-#if ENABLE_EXPORTING_PUBSUB
-static void test_init_pubsub_instance(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
-
- struct pubsub_specific_config *connector_specific_config =
- callocz(1, sizeof(struct pubsub_specific_config));
- instance->config.connector_specific_config = connector_specific_config;
- connector_specific_config->credentials_file = strdupz("/test/credentials/file");
- connector_specific_config->project_id = strdupz("test_project_id");
- connector_specific_config->topic_id = strdupz("test_topic_id");
-
- expect_function_call(__wrap_pubsub_init);
- expect_not_value(__wrap_pubsub_init, pubsub_specific_data_p, NULL);
- expect_string(__wrap_pubsub_init, destination, "localhost");
- expect_string(__wrap_pubsub_init, error_message, "");
- expect_string(__wrap_pubsub_init, credentials_file, "/test/credentials/file");
- expect_string(__wrap_pubsub_init, project_id, "test_project_id");
- expect_string(__wrap_pubsub_init, topic_id, "test_topic_id");
- will_return(__wrap_pubsub_init, 0);
-
- assert_int_equal(init_pubsub_instance(instance), 0);
-
- assert_ptr_equal(instance->worker, pubsub_connector_worker);
- assert_ptr_equal(instance->start_batch_formatting, NULL);
- assert_ptr_equal(instance->start_host_formatting, format_host_labels_json_plaintext);
- assert_ptr_equal(instance->start_chart_formatting, NULL);
- assert_ptr_equal(instance->metric_formatting, format_dimension_collected_json_plaintext);
- assert_ptr_equal(instance->end_chart_formatting, NULL);
- assert_ptr_equal(instance->end_host_formatting, flush_host_labels);
- assert_ptr_equal(instance->end_batch_formatting, NULL);
- assert_ptr_not_equal(instance->buffer, NULL);
- buffer_free(instance->buffer);
- assert_ptr_not_equal(instance->connector_specific_data, NULL);
- freez(instance->connector_specific_data);
-
- instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
-
- expect_function_call(__wrap_pubsub_init);
- expect_not_value(__wrap_pubsub_init, pubsub_specific_data_p, NULL);
- expect_string(__wrap_pubsub_init, destination, "localhost");
- expect_string(__wrap_pubsub_init, error_message, "");
- expect_string(__wrap_pubsub_init, credentials_file, "/test/credentials/file");
- expect_string(__wrap_pubsub_init, project_id, "test_project_id");
- expect_string(__wrap_pubsub_init, topic_id, "test_topic_id");
- will_return(__wrap_pubsub_init, 0);
-
- assert_int_equal(init_pubsub_instance(instance), 0);
- assert_ptr_equal(instance->metric_formatting, format_dimension_stored_json_plaintext);
-
- free(connector_specific_config->credentials_file);
- free(connector_specific_config->project_id);
- free(connector_specific_config->topic_id);
-}
-
-static void test_pubsub_connector_worker(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
- struct stats *stats = &instance->stats;
-
- __real_mark_scheduled_instances(engine);
-
- expect_function_call(__wrap_rrdhost_is_exportable);
- expect_value(__wrap_rrdhost_is_exportable, instance, instance);
- expect_value(__wrap_rrdhost_is_exportable, host, localhost);
- will_return(__wrap_rrdhost_is_exportable, 1);
-
- RRDSET *st;
- rrdset_foreach_read(st, localhost);
- break;
- rrdset_foreach_done(st);
-
- expect_function_call(__wrap_rrdset_is_exportable);
- expect_value(__wrap_rrdset_is_exportable, instance, instance);
- expect_value(__wrap_rrdset_is_exportable, st, st);
- will_return(__wrap_rrdset_is_exportable, 1);
-
- expect_function_call(__wrap_simple_connector_end_batch);
- expect_value(__wrap_simple_connector_end_batch, instance, instance);
- will_return(__wrap_simple_connector_end_batch, 0);
- __real_prepare_buffers(engine);
-
- struct pubsub_specific_config *connector_specific_config =
- callocz(1, sizeof(struct pubsub_specific_config));
- instance->config.connector_specific_config = connector_specific_config;
- connector_specific_config->credentials_file = strdupz("/test/credentials/file");
- connector_specific_config->project_id = strdupz("test_project_id");
- connector_specific_config->topic_id = strdupz("test_topic_id");
-
- struct pubsub_specific_data *connector_specific_data = callocz(1, sizeof(struct pubsub_specific_data));
- instance->connector_specific_data = (void *)connector_specific_data;
-
- expect_function_call(__wrap_pubsub_add_message);
- expect_not_value(__wrap_pubsub_add_message, pubsub_specific_data_p, NULL);
- // The buffer is prepared by Graphite exporting connector
- expect_string(
- __wrap_pubsub_add_message, data,
- "netdata.test-host.chart_name.dimension_name;TAG1=VALUE1 TAG2=VALUE2 123000321 15051\n");
- will_return(__wrap_pubsub_add_message, 0);
-
- expect_function_call(__wrap_pubsub_publish);
- expect_not_value(__wrap_pubsub_publish, pubsub_specific_data_p, NULL);
- expect_string(__wrap_pubsub_publish, error_message, "");
- expect_value(__wrap_pubsub_publish, buffered_metrics, 1);
- expect_value(__wrap_pubsub_publish, buffered_bytes, 84);
- will_return(__wrap_pubsub_publish, 0);
-
- expect_function_call(__wrap_pubsub_get_result);
- expect_not_value(__wrap_pubsub_get_result, pubsub_specific_data_p, NULL);
- expect_not_value(__wrap_pubsub_get_result, error_message, NULL);
- expect_not_value(__wrap_pubsub_get_result, sent_metrics, NULL);
- expect_not_value(__wrap_pubsub_get_result, sent_bytes, NULL);
- expect_not_value(__wrap_pubsub_get_result, lost_metrics, NULL);
- expect_not_value(__wrap_pubsub_get_result, lost_bytes, NULL);
- will_return(__wrap_pubsub_get_result, 0);
-
- expect_function_call(__wrap_send_internal_metrics);
- expect_value(__wrap_send_internal_metrics, instance, instance);
- will_return(__wrap_send_internal_metrics, 0);
-
- pubsub_connector_worker(instance);
-
- assert_int_equal(stats->buffered_metrics, 0);
- assert_int_equal(stats->buffered_bytes, 84);
- assert_int_equal(stats->received_bytes, 0);
- assert_int_equal(stats->sent_bytes, 84);
- assert_int_equal(stats->sent_metrics, 0);
- assert_int_equal(stats->lost_metrics, 0);
- assert_int_equal(stats->receptions, 1);
- assert_int_equal(stats->transmission_successes, 1);
- assert_int_equal(stats->transmission_failures, 0);
- assert_int_equal(stats->data_lost_events, 0);
- assert_int_equal(stats->lost_bytes, 0);
- assert_int_equal(stats->reconnects, 0);
-
- free(connector_specific_config->credentials_file);
- free(connector_specific_config->project_id);
- free(connector_specific_config->topic_id);
-}
-#endif // ENABLE_EXPORTING_PUBSUB
-
-#if HAVE_MONGOC
-static void test_init_mongodb_instance(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
-
- struct mongodb_specific_config *connector_specific_config = callocz(1, sizeof(struct mongodb_specific_config));
- instance->config.connector_specific_config = connector_specific_config;
- connector_specific_config->database = strdupz("test_database");
- connector_specific_config->collection = strdupz("test_collection");
- instance->config.buffer_on_failures = 10;
-
- expect_function_call(__wrap_mongoc_init);
- expect_function_call(__wrap_mongoc_uri_new_with_error);
- expect_string(__wrap_mongoc_uri_new_with_error, uri_string, "localhost");
- expect_not_value(__wrap_mongoc_uri_new_with_error, error, NULL);
- will_return(__wrap_mongoc_uri_new_with_error, 0xf1);
-
- expect_function_call(__wrap_mongoc_uri_get_option_as_int32);
- expect_value(__wrap_mongoc_uri_get_option_as_int32, uri, 0xf1);
- expect_string(__wrap_mongoc_uri_get_option_as_int32, option, MONGOC_URI_SOCKETTIMEOUTMS);
- expect_value(__wrap_mongoc_uri_get_option_as_int32, fallback, 1000);
- will_return(__wrap_mongoc_uri_get_option_as_int32, 1000);
-
- expect_function_call(__wrap_mongoc_uri_set_option_as_int32);
- expect_value(__wrap_mongoc_uri_set_option_as_int32, uri, 0xf1);
- expect_string(__wrap_mongoc_uri_set_option_as_int32, option, MONGOC_URI_SOCKETTIMEOUTMS);
- expect_value(__wrap_mongoc_uri_set_option_as_int32, value, 1000);
- will_return(__wrap_mongoc_uri_set_option_as_int32, true);
-
- expect_function_call(__wrap_mongoc_client_new_from_uri);
- expect_value(__wrap_mongoc_client_new_from_uri, uri, 0xf1);
- will_return(__wrap_mongoc_client_new_from_uri, 0xf2);
-
- expect_function_call(__wrap_mongoc_client_set_appname);
- expect_value(__wrap_mongoc_client_set_appname, client, 0xf2);
- expect_string(__wrap_mongoc_client_set_appname, appname, "netdata");
- will_return(__wrap_mongoc_client_set_appname, true);
-
- expect_function_call(__wrap_mongoc_client_get_collection);
- expect_value(__wrap_mongoc_client_get_collection, client, 0xf2);
- expect_string(__wrap_mongoc_client_get_collection, db, "test_database");
- expect_string(__wrap_mongoc_client_get_collection, collection, "test_collection");
- will_return(__wrap_mongoc_client_get_collection, 0xf3);
-
- expect_function_call(__wrap_mongoc_uri_destroy);
- expect_value(__wrap_mongoc_uri_destroy, uri, 0xf1);
-
- assert_int_equal(init_mongodb_instance(instance), 0);
-
- assert_ptr_equal(instance->worker, mongodb_connector_worker);
- assert_ptr_equal(instance->start_batch_formatting, NULL);
- assert_ptr_equal(instance->start_host_formatting, format_host_labels_json_plaintext);
- assert_ptr_equal(instance->start_chart_formatting, NULL);
- assert_ptr_equal(instance->metric_formatting, format_dimension_collected_json_plaintext);
- assert_ptr_equal(instance->end_chart_formatting, NULL);
- assert_ptr_equal(instance->end_host_formatting, flush_host_labels);
- assert_ptr_equal(instance->end_batch_formatting, format_batch_mongodb);
- assert_ptr_equal(instance->prepare_header, NULL);
- assert_ptr_equal(instance->check_response, NULL);
-
- assert_ptr_not_equal(instance->buffer, NULL);
- buffer_free(instance->buffer);
-
- assert_ptr_not_equal(instance->connector_specific_data, NULL);
-
- struct mongodb_specific_data *connector_specific_data =
- (struct mongodb_specific_data *)instance->connector_specific_data;
- size_t number_of_buffers = 1;
- struct bson_buffer *current_buffer = connector_specific_data->first_buffer;
- while (current_buffer->next != connector_specific_data->first_buffer) {
- current_buffer = current_buffer->next;
- number_of_buffers++;
- if (number_of_buffers == (size_t)(instance->config.buffer_on_failures + 1)) {
- number_of_buffers = 0;
- break;
- }
- }
- assert_int_equal(number_of_buffers, 9);
-
- free(connector_specific_config->database);
- free(connector_specific_config->collection);
-}
-
-static void test_format_batch_mongodb(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
- struct stats *stats = &instance->stats;
-
- struct mongodb_specific_data *connector_specific_data = mallocz(sizeof(struct mongodb_specific_data));
- instance->connector_specific_data = (void *)connector_specific_data;
-
- struct bson_buffer *current_buffer = callocz(1, sizeof(struct bson_buffer));
- connector_specific_data->first_buffer = current_buffer;
- connector_specific_data->first_buffer->next = current_buffer;
- connector_specific_data->last_buffer = current_buffer;
-
- BUFFER *buffer = buffer_create(0, NULL);
- buffer_sprintf(buffer, "{ \"metric\": \"test_metric\" }\n");
- instance->buffer = buffer;
- stats->buffered_metrics = 1;
-
- assert_int_equal(format_batch_mongodb(instance), 0);
-
- assert_int_equal(connector_specific_data->last_buffer->documents_inserted, 1);
- assert_int_equal(buffer_strlen(buffer), 0);
-
- size_t len;
- char *str = bson_as_canonical_extended_json(connector_specific_data->last_buffer->insert[0], &len);
- assert_string_equal(str, "{ \"metric\" : \"test_metric\" }");
-
- freez(str);
- buffer_free(buffer);
-}
-
-static void test_mongodb_connector_worker(void **state)
-{
- struct engine *engine = *state;
- struct instance *instance = engine->instance_root;
-
- struct mongodb_specific_config *connector_specific_config = callocz(1, sizeof(struct mongodb_specific_config));
- instance->config.connector_specific_config = connector_specific_config;
- connector_specific_config->database = strdupz("test_database");
-
- struct mongodb_specific_data *connector_specific_data = callocz(1, sizeof(struct mongodb_specific_data));
- instance->connector_specific_data = (void *)connector_specific_data;
- connector_specific_config->collection = strdupz("test_collection");
-
- struct bson_buffer *buffer = callocz(1, sizeof(struct bson_buffer));
- buffer->documents_inserted = 1;
- connector_specific_data->first_buffer = buffer;
- connector_specific_data->first_buffer->next = buffer;
-
- connector_specific_data->first_buffer->insert = callocz(1, sizeof(bson_t *));
- bson_error_t bson_error;
- connector_specific_data->first_buffer->insert[0] =
- bson_new_from_json((const uint8_t *)"{ \"test_key\" : \"test_value\" }", -1, &bson_error);
-
- connector_specific_data->client = mongoc_client_new("mongodb://localhost");
- connector_specific_data->collection =
- __real_mongoc_client_get_collection(connector_specific_data->client, "test_database", "test_collection");
-
- expect_function_call(__wrap_mongoc_collection_insert_many);
- expect_value(__wrap_mongoc_collection_insert_many, collection, connector_specific_data->collection);
- expect_value(__wrap_mongoc_collection_insert_many, documents, connector_specific_data->first_buffer->insert);
- expect_value(__wrap_mongoc_collection_insert_many, n_documents, 1);
- expect_value(__wrap_mongoc_collection_insert_many, opts, NULL);
- expect_value(__wrap_mongoc_collection_insert_many, reply, NULL);
- expect_not_value(__wrap_mongoc_collection_insert_many, error, NULL);
- will_return(__wrap_mongoc_collection_insert_many, true);
-
- expect_function_call(__wrap_send_internal_metrics);
- expect_value(__wrap_send_internal_metrics, instance, instance);
- will_return(__wrap_send_internal_metrics, 0);
-
- mongodb_connector_worker(instance);
-
- assert_ptr_equal(connector_specific_data->first_buffer->insert, NULL);
- assert_int_equal(connector_specific_data->first_buffer->documents_inserted, 0);
- assert_ptr_equal(connector_specific_data->first_buffer, connector_specific_data->first_buffer->next);
-
- struct stats *stats = &instance->stats;
- assert_int_equal(stats->buffered_metrics, 0);
- assert_int_equal(stats->buffered_bytes, 0);
- assert_int_equal(stats->received_bytes, 0);
- assert_int_equal(stats->sent_bytes, 30);
- assert_int_equal(stats->sent_metrics, 1);
- assert_int_equal(stats->lost_metrics, 0);
- assert_int_equal(stats->receptions, 1);
- assert_int_equal(stats->transmission_successes, 1);
- assert_int_equal(stats->transmission_failures, 0);
- assert_int_equal(stats->data_lost_events, 0);
- assert_int_equal(stats->lost_bytes, 0);
- assert_int_equal(stats->reconnects, 0);
-
- free(connector_specific_config->database);
- free(connector_specific_config->collection);
-}
-#endif // HAVE_MONGOC
-
-int main(void)
-{
- const struct CMUnitTest tests[] = {
- cmocka_unit_test_setup_teardown(test_exporting_engine, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test(test_read_exporting_config),
- cmocka_unit_test_setup_teardown(test_init_connectors, setup_configured_engine, teardown_configured_engine),
- cmocka_unit_test_setup_teardown(
- test_init_graphite_instance, setup_configured_engine, teardown_configured_engine),
- cmocka_unit_test_setup_teardown(
- test_init_json_instance, setup_configured_engine, teardown_configured_engine),
- cmocka_unit_test_setup_teardown(
- test_init_opentsdb_telnet_instance, setup_configured_engine, teardown_configured_engine),
- cmocka_unit_test_setup_teardown(
- test_init_opentsdb_http_instance, setup_configured_engine, teardown_configured_engine),
- cmocka_unit_test_setup_teardown(
- test_mark_scheduled_instances, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_rrdhost_is_exportable, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_false_rrdhost_is_exportable, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_rrdset_is_exportable, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_false_rrdset_is_exportable, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_exporting_calculate_value_from_stored_data, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(test_prepare_buffers, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test(test_exporting_name_copy),
- cmocka_unit_test_setup_teardown(
- test_format_dimension_collected_graphite_plaintext, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_format_dimension_stored_graphite_plaintext, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_format_dimension_collected_json_plaintext, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_format_dimension_stored_json_plaintext, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_format_dimension_collected_opentsdb_telnet, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_format_dimension_stored_opentsdb_telnet, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_format_dimension_collected_opentsdb_http, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_format_dimension_stored_opentsdb_http, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_exporting_discard_response, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_simple_connector_receive_response, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_simple_connector_send_buffer, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_simple_connector_worker, setup_initialized_engine, teardown_initialized_engine),
- };
-
- const struct CMUnitTest label_tests[] = {
- cmocka_unit_test(test_sanitize_json_string),
- cmocka_unit_test(test_sanitize_graphite_label_value),
- cmocka_unit_test(test_sanitize_opentsdb_label_value),
- cmocka_unit_test_setup_teardown(
- test_format_host_labels_json_plaintext, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_format_host_labels_graphite_plaintext, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_format_host_labels_opentsdb_telnet, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_format_host_labels_opentsdb_http, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(test_flush_host_labels, setup_initialized_engine, teardown_initialized_engine),
- };
-
- int test_res = cmocka_run_group_tests_name("exporting_engine", tests, NULL, NULL) +
- cmocka_run_group_tests_name("labels_in_exporting_engine", label_tests, NULL, NULL);
-
- const struct CMUnitTest internal_metrics_tests[] = {
- cmocka_unit_test_setup_teardown(test_create_main_rusage_chart, setup_rrdhost, teardown_rrdhost),
- cmocka_unit_test(test_send_main_rusage),
- cmocka_unit_test(test_send_internal_metrics),
- };
-
- test_res += cmocka_run_group_tests_name("internal_metrics", internal_metrics_tests, NULL, NULL);
-
- const struct CMUnitTest prometheus_web_api_tests[] = {
- cmocka_unit_test_setup_teardown(test_can_send_rrdset, setup_prometheus, teardown_prometheus),
- cmocka_unit_test_setup_teardown(test_prometheus_name_copy, setup_prometheus, teardown_prometheus),
- cmocka_unit_test_setup_teardown(test_prometheus_label_copy, setup_prometheus, teardown_prometheus),
- cmocka_unit_test_setup_teardown(test_prometheus_units_copy, setup_prometheus, teardown_prometheus),
- cmocka_unit_test_setup_teardown(
- test_format_host_labels_prometheus, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- rrd_stats_api_v1_charts_allmetrics_prometheus, setup_prometheus, teardown_prometheus),
- };
-
- test_res += cmocka_run_group_tests_name("prometheus_web_api", prometheus_web_api_tests, NULL, NULL);
-
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
- const struct CMUnitTest prometheus_remote_write_tests[] = {
- cmocka_unit_test_setup_teardown(
- test_init_prometheus_remote_write_instance, setup_configured_engine, teardown_configured_engine),
- cmocka_unit_test_setup_teardown(
- test_prometheus_remote_write_prepare_header, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test(test_process_prometheus_remote_write_response),
- cmocka_unit_test_setup_teardown(
- test_format_host_prometheus_remote_write, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_format_dimension_prometheus_remote_write, setup_initialized_engine, teardown_initialized_engine),
- cmocka_unit_test_setup_teardown(
- test_format_batch_prometheus_remote_write, setup_initialized_engine, teardown_initialized_engine),
- };
-
- test_res += cmocka_run_group_tests_name(
- "prometheus_remote_write_exporting_connector", prometheus_remote_write_tests, NULL, NULL);
-#endif
-
-#if HAVE_KINESIS
- const struct CMUnitTest kinesis_tests[] = {
- cmocka_unit_test_setup_teardown(
- test_init_aws_kinesis_instance, setup_configured_engine, teardown_configured_engine),
- cmocka_unit_test_setup_teardown(
- test_aws_kinesis_connector_worker, setup_initialized_engine, teardown_initialized_engine),
- };
-
- test_res += cmocka_run_group_tests_name("kinesis_exporting_connector", kinesis_tests, NULL, NULL);
-#endif
-
-#if ENABLE_EXPORTING_PUBSUB
- const struct CMUnitTest pubsub_tests[] = {
- cmocka_unit_test_setup_teardown(
- test_init_pubsub_instance, setup_configured_engine, teardown_configured_engine),
- cmocka_unit_test_setup_teardown(
- test_pubsub_connector_worker, setup_initialized_engine, teardown_initialized_engine),
- };
-
- test_res += cmocka_run_group_tests_name("pubsub_exporting_connector", pubsub_tests, NULL, NULL);
-#endif
-
-#if HAVE_MONGOC
- const struct CMUnitTest mongodb_tests[] = {
- cmocka_unit_test_setup_teardown(
- test_init_mongodb_instance, setup_configured_engine, teardown_configured_engine),
- cmocka_unit_test_setup_teardown(
- test_format_batch_mongodb, setup_configured_engine, teardown_configured_engine),
- cmocka_unit_test_setup_teardown(
- test_mongodb_connector_worker, setup_configured_engine, teardown_configured_engine),
- };
-
- test_res += cmocka_run_group_tests_name("mongodb_exporting_connector", mongodb_tests, NULL, NULL);
-#endif
-
- return test_res;
-}
diff --git a/exporting/tests/test_exporting_engine.h b/exporting/tests/test_exporting_engine.h
deleted file mode 100644
index 24dac8630..000000000
--- a/exporting/tests/test_exporting_engine.h
+++ /dev/null
@@ -1,209 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef TEST_EXPORTING_ENGINE_H
-#define TEST_EXPORTING_ENGINE_H 1
-
-#include "libnetdata/libnetdata.h"
-#include "database/rrdvar.h"
-
-#include "exporting/exporting_engine.h"
-#include "exporting/graphite/graphite.h"
-#include "exporting/json/json.h"
-#include "exporting/opentsdb/opentsdb.h"
-
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
-#include "exporting/prometheus/remote_write/remote_write.h"
-#endif
-
-#if HAVE_KINESIS
-#include "exporting/aws_kinesis/aws_kinesis.h"
-#endif
-
-#if ENABLE_EXPORTING_PUBSUB
-#include "exporting/pubsub/pubsub.h"
-#endif
-
-#if HAVE_MONGOC
-#include "exporting/mongodb/mongodb.h"
-#endif
-
-#include <stdarg.h>
-#include <stddef.h>
-#include <setjmp.h>
-#include <stdint.h>
-
-#ifndef UNIT_TESTING
-#include <cmocka.h>
-#else
-#undef UNIT_TESTING
-#include <cmocka.h>
-#define UNIT_TESTING
-#endif
-
-#define MAX_LOG_LINE 1024
-extern char log_line[];
-
-// -----------------------------------------------------------------------
-// doubles for Netdata functions
-
-const char *__wrap_strdupz(const char *s);
-void __wrap_info_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...);
-int __wrap_connect_to_one_of(
- const char *destination,
- int default_port,
- struct timeval *timeout,
- size_t *reconnects_counter,
- char *connected_to,
- size_t connected_to_size);
-time_t __mock_rrddim_query_oldest_time(STORAGE_METRIC_HANDLE *db_metric_handle);
-time_t __mock_rrddim_query_latest_time(STORAGE_METRIC_HANDLE *db_metric_handle);
-void __mock_rrddim_query_init(STORAGE_METRIC_HANDLE *db_metric_handle, struct rrddim_query_handle *handle, time_t start_time, time_t end_time);
-int __mock_rrddim_query_is_finished(struct rrddim_query_handle *handle);
-STORAGE_POINT __mock_rrddim_query_next_metric(struct rrddim_query_handle *handle);
-void __mock_rrddim_query_finalize(struct rrddim_query_handle *handle);
-
-// -----------------------------------------------------------------------
-// wraps for system functions
-
-void __wrap_uv_thread_create(uv_thread_t thread, void (*worker)(void *arg), void *arg);
-void __wrap_uv_mutex_lock(uv_mutex_t *mutex);
-void __wrap_uv_mutex_unlock(uv_mutex_t *mutex);
-void __wrap_uv_cond_signal(uv_cond_t *cond_var);
-void __wrap_uv_cond_wait(uv_cond_t *cond_var, uv_mutex_t *mutex);
-ssize_t __wrap_recv(int sockfd, void *buf, size_t len, int flags);
-ssize_t __wrap_send(int sockfd, const void *buf, size_t len, int flags);
-
-// -----------------------------------------------------------------------
-// doubles and originals for exporting engine functions
-
-struct engine *__real_read_exporting_config();
-struct engine *__wrap_read_exporting_config();
-struct engine *__mock_read_exporting_config();
-
-int __real_init_connectors(struct engine *engine);
-int __wrap_init_connectors(struct engine *engine);
-
-int __real_mark_scheduled_instances(struct engine *engine);
-int __wrap_mark_scheduled_instances(struct engine *engine);
-
-NETDATA_DOUBLE __real_exporting_calculate_value_from_stored_data(
- struct instance *instance,
- RRDDIM *rd,
- time_t *last_timestamp);
-NETDATA_DOUBLE __wrap_exporting_calculate_value_from_stored_data(
- struct instance *instance,
- RRDDIM *rd,
- time_t *last_timestamp);
-
-int __real_prepare_buffers(struct engine *engine);
-int __wrap_prepare_buffers(struct engine *engine);
-
-void __real_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system);
-void __wrap_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system);
-
-void __real_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system);
-void __wrap_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system);
-
-int __real_send_internal_metrics(struct instance *instance);
-int __wrap_send_internal_metrics(struct instance *instance);
-
-int __real_rrdhost_is_exportable(struct instance *instance, RRDHOST *host);
-int __wrap_rrdhost_is_exportable(struct instance *instance, RRDHOST *host);
-
-int __real_rrdset_is_exportable(struct instance *instance, RRDSET *st);
-int __wrap_rrdset_is_exportable(struct instance *instance, RRDSET *st);
-
-int __mock_start_batch_formatting(struct instance *instance);
-int __mock_start_host_formatting(struct instance *instance, RRDHOST *host);
-int __mock_start_chart_formatting(struct instance *instance, RRDSET *st);
-int __mock_metric_formatting(struct instance *instance, RRDDIM *rd);
-int __mock_end_chart_formatting(struct instance *instance, RRDSET *st);
-int __mock_variables_formatting(struct instance *instance, RRDHOST *host);
-int __mock_end_host_formatting(struct instance *instance, RRDHOST *host);
-int __mock_end_batch_formatting(struct instance *instance);
-
-int __wrap_simple_connector_end_batch(struct instance *instance);
-
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
-void *__real_init_write_request();
-void *__wrap_init_write_request();
-
-void __real_add_host_info(
- void *write_request_p,
- const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp);
-void __wrap_add_host_info(
- void *write_request_p,
- const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp);
-
-void __real_add_label(void *write_request_p, char *key, char *value);
-void __wrap_add_label(void *write_request_p, char *key, char *value);
-
-void __real_add_metric(
- void *write_request_p,
- const char *name, const char *chart, const char *family, const char *dimension,
- const char *instance, const double value, const int64_t timestamp);
-void __wrap_add_metric(
- void *write_request_p,
- const char *name, const char *chart, const char *family, const char *dimension,
- const char *instance, const double value, const int64_t timestamp);
-#endif /* ENABLE_PROMETHEUS_REMOTE_WRITE */
-
-#if HAVE_KINESIS
-void __wrap_aws_sdk_init();
-void __wrap_kinesis_init(
- void *kinesis_specific_data_p, const char *region, const char *access_key_id, const char *secret_key,
- const long timeout);
-void __wrap_kinesis_put_record(
- void *kinesis_specific_data_p, const char *stream_name, const char *partition_key, const char *data,
- size_t data_len);
-int __wrap_kinesis_get_result(void *request_outcomes_p, char *error_message, size_t *sent_bytes, size_t *lost_bytes);
-#endif /* HAVE_KINESIS */
-
-#if ENABLE_EXPORTING_PUBSUB
-int __wrap_pubsub_init(
- void *pubsub_specific_data_p, char *error_message, const char *destination, const char *credentials_file,
- const char *project_id, const char *topic_id);
-int __wrap_pubsub_add_message(void *pubsub_specific_data_p, char *data);
-int __wrap_pubsub_publish(
- void *pubsub_specific_data_p, char *error_message, size_t buffered_metrics, size_t buffered_bytes);
-int __wrap_pubsub_get_result(
- void *pubsub_specific_data_p, char *error_message,
- size_t *sent_metrics, size_t *sent_bytes, size_t *lost_metrics, size_t *lost_bytes);
-#endif /* ENABLE_EXPORTING_PUBSUB */
-
-#if HAVE_MONGOC
-void __wrap_mongoc_init();
-mongoc_uri_t *__wrap_mongoc_uri_new_with_error(const char *uri_string, bson_error_t *error);
-int32_t __wrap_mongoc_uri_get_option_as_int32(const mongoc_uri_t *uri, const char *option, int32_t fallback);
-bool __wrap_mongoc_uri_set_option_as_int32(const mongoc_uri_t *uri, const char *option, int32_t value);
-mongoc_client_t *__wrap_mongoc_client_new_from_uri(const mongoc_uri_t *uri);
-bool __wrap_mongoc_client_set_appname(mongoc_client_t *client, const char *appname);
-mongoc_collection_t *
-__wrap_mongoc_client_get_collection(mongoc_client_t *client, const char *db, const char *collection);
-mongoc_collection_t *
-__real_mongoc_client_get_collection(mongoc_client_t *client, const char *db, const char *collection);
-void __wrap_mongoc_uri_destroy(mongoc_uri_t *uri);
-bool __wrap_mongoc_collection_insert_many(
- mongoc_collection_t *collection,
- const bson_t **documents,
- size_t n_documents,
- const bson_t *opts,
- bson_t *reply,
- bson_error_t *error);
-#endif /* HAVE_MONGOC */
-
-// -----------------------------------------------------------------------
-// fixtures
-
-int setup_configured_engine(void **state);
-int teardown_configured_engine(void **state);
-int setup_rrdhost();
-int teardown_rrdhost();
-int setup_initialized_engine(void **state);
-int teardown_initialized_engine(void **state);
-int setup_prometheus(void **state);
-int teardown_prometheus(void **state);
-
-void init_connectors_in_tests(struct engine *engine);
-
-#endif /* TEST_EXPORTING_ENGINE_H */
diff --git a/exporting/TIMESCALE.md b/src/exporting/TIMESCALE.md
index 8ca61b75e..3bad28379 100644
--- a/exporting/TIMESCALE.md
+++ b/src/exporting/TIMESCALE.md
@@ -1,7 +1,7 @@
<!--
title: "Writing metrics to TimescaleDB"
description: "Send Netdata metrics to TimescaleDB for long-term archiving and further analysis."
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/TIMESCALE.md"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/TIMESCALE.md"
sidebar_label: "Writing metrics to TimescaleDB"
learn_status: "Published"
learn_rel_path: "Integrations/Export"
diff --git a/exporting/WALKTHROUGH.md b/src/exporting/WALKTHROUGH.md
index 86be758e4..ce0ec672f 100644
--- a/exporting/WALKTHROUGH.md
+++ b/src/exporting/WALKTHROUGH.md
@@ -63,7 +63,7 @@ the following command in your container.
<!-- candidate for reuse -->
```sh
-wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --dont-wait
+wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --dont-wait
```
After the install completes you should be able to hit the Netdata dashboard at <http://localhost:19999/> (replace
@@ -217,7 +217,7 @@ the `chart` dimension. If you'd like you can combine the `chart` and `instance`
Let's give this a try: `netdata_system_cpu_percentage_average{chart="system.cpu", instance="netdata:19999"}`
This is the basics of using Prometheus to query Netdata. I'd advise everyone at this point to read [this
-page](https://github.com/netdata/netdata/blob/master/exporting/prometheus/README.md#using-netdata-with-prometheus). The key point here is that Netdata can export metrics from
+page](https://github.com/netdata/netdata/blob/master/src/exporting/prometheus/README.md#using-netdata-with-prometheus). The key point here is that Netdata can export metrics from
its internal DB or can send metrics _as-collected_ by specifying the `source=as-collected` URL parameter like so.
<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes&types=yes&source=as-collected> If you choose to use
this method you will need to use Prometheus's set of functions here: <https://prometheus.io/docs/querying/functions/> to
diff --git a/exporting/aws_kinesis/README.md b/src/exporting/aws_kinesis/README.md
index dbc98ac13..dbc98ac13 120000
--- a/exporting/aws_kinesis/README.md
+++ b/src/exporting/aws_kinesis/README.md
diff --git a/exporting/aws_kinesis/aws_kinesis.c b/src/exporting/aws_kinesis/aws_kinesis.c
index 498d9ee23..498d9ee23 100644
--- a/exporting/aws_kinesis/aws_kinesis.c
+++ b/src/exporting/aws_kinesis/aws_kinesis.c
diff --git a/exporting/aws_kinesis/aws_kinesis.h b/src/exporting/aws_kinesis/aws_kinesis.h
index d88a45861..d88a45861 100644
--- a/exporting/aws_kinesis/aws_kinesis.h
+++ b/src/exporting/aws_kinesis/aws_kinesis.h
diff --git a/exporting/aws_kinesis/aws_kinesis_put_record.cc b/src/exporting/aws_kinesis/aws_kinesis_put_record.cc
index 62c6b0301..62c6b0301 100644
--- a/exporting/aws_kinesis/aws_kinesis_put_record.cc
+++ b/src/exporting/aws_kinesis/aws_kinesis_put_record.cc
diff --git a/exporting/aws_kinesis/aws_kinesis_put_record.h b/src/exporting/aws_kinesis/aws_kinesis_put_record.h
index 321baf669..321baf669 100644
--- a/exporting/aws_kinesis/aws_kinesis_put_record.h
+++ b/src/exporting/aws_kinesis/aws_kinesis_put_record.h
diff --git a/exporting/aws_kinesis/integrations/aws_kinesis.md b/src/exporting/aws_kinesis/integrations/aws_kinesis.md
index deff55be7..351d88495 100644
--- a/exporting/aws_kinesis/integrations/aws_kinesis.md
+++ b/src/exporting/aws_kinesis/integrations/aws_kinesis.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/aws_kinesis/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/aws_kinesis/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/aws_kinesis/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/aws_kinesis/metadata.yaml"
sidebar_label: "AWS Kinesis"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -50,7 +50,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -76,7 +76,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/check_filters.c b/src/exporting/check_filters.c
index 19eecc9bc..19eecc9bc 100644
--- a/exporting/check_filters.c
+++ b/src/exporting/check_filters.c
diff --git a/exporting/clean_connectors.c b/src/exporting/clean_connectors.c
index c850c5ffa..c850c5ffa 100644
--- a/exporting/clean_connectors.c
+++ b/src/exporting/clean_connectors.c
diff --git a/exporting/exporting.conf b/src/exporting/exporting.conf
index c43b2af9e..c43b2af9e 100644
--- a/exporting/exporting.conf
+++ b/src/exporting/exporting.conf
diff --git a/exporting/exporting_engine.c b/src/exporting/exporting_engine.c
index f42a36e92..739c14baf 100644
--- a/exporting/exporting_engine.c
+++ b/src/exporting/exporting_engine.c
@@ -51,7 +51,7 @@ void analytics_exporting_connectors(BUFFER *b)
buffer_strcat(b, "OpenTSDBHTTP");
break;
case EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE:
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
buffer_strcat(b, "PrometheusRemoteWrite");
#endif
break;
@@ -66,7 +66,7 @@ void analytics_exporting_connectors(BUFFER *b)
#endif
break;
case EXPORTING_CONNECTOR_TYPE_MONGODB:
-#if HAVE_MONGOC
+#ifdef HAVE_MONGOC
buffer_strcat(b, "MongoDB");
#endif
break;
@@ -95,7 +95,7 @@ static void exporting_clean_engine()
aws_sdk_shutdown();
#endif
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
if (engine->protocol_buffers_initialized)
protocol_buffers_shutdown();
#endif
@@ -184,7 +184,9 @@ void *exporting_main(void *ptr)
if (init_connectors(engine) != 0) {
netdata_log_error("EXPORTING: cannot initialize exporting connectors");
- send_statistics("EXPORTING_START", "FAIL", "-");
+
+ analytics_statistic_t statistic = { "EXPORTING_START", "FAIL", "-" };
+ analytics_statistic_send(&statistic);
goto cleanup;
}
diff --git a/exporting/exporting_engine.h b/src/exporting/exporting_engine.h
index fb09b771a..beaa0ba87 100644
--- a/exporting/exporting_engine.h
+++ b/src/exporting/exporting_engine.h
@@ -311,7 +311,7 @@ static inline void disable_instance(struct instance *instance)
#include "exporting/prometheus/prometheus.h"
#include "exporting/opentsdb/opentsdb.h"
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
#include "exporting/prometheus/remote_write/remote_write.h"
#endif
diff --git a/exporting/graphite/README.md b/src/exporting/graphite/README.md
index 15f360d17..15f360d17 120000
--- a/exporting/graphite/README.md
+++ b/src/exporting/graphite/README.md
diff --git a/exporting/graphite/graphite.c b/src/exporting/graphite/graphite.c
index 254db982e..9edde4503 100644
--- a/exporting/graphite/graphite.c
+++ b/src/exporting/graphite/graphite.c
@@ -133,13 +133,11 @@ int format_dimension_collected_graphite_plaintext(struct instance *instance, RRD
buffer_sprintf(
instance->buffer,
- "%s.%s.%s.%s%s%s%s " COLLECTED_NUMBER_FORMAT " %llu\n",
+ "%s.%s.%s.%s%s " COLLECTED_NUMBER_FORMAT " %llu\n",
instance->config.prefix,
(host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
chart_name,
dimension_name,
- (host->tags) ? ";" : "",
- (host->tags) ? rrdhost_tags(host) : "",
(instance->labels_buffer) ? buffer_tostring(instance->labels_buffer) : "",
rd->collector.last_collected_value,
(unsigned long long)rd->collector.last_collected_time.tv_sec);
@@ -179,13 +177,11 @@ int format_dimension_stored_graphite_plaintext(struct instance *instance, RRDDIM
buffer_sprintf(
instance->buffer,
- "%s.%s.%s.%s%s%s%s " NETDATA_DOUBLE_FORMAT " %llu\n",
+ "%s.%s.%s.%s%s " NETDATA_DOUBLE_FORMAT " %llu\n",
instance->config.prefix,
(host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
chart_name,
dimension_name,
- (host->tags) ? ";" : "",
- (host->tags) ? rrdhost_tags(host) : "",
(instance->labels_buffer) ? buffer_tostring(instance->labels_buffer) : "",
value,
(unsigned long long)last_t);
diff --git a/exporting/graphite/graphite.h b/src/exporting/graphite/graphite.h
index 79f87e46e..79f87e46e 100644
--- a/exporting/graphite/graphite.h
+++ b/src/exporting/graphite/graphite.h
diff --git a/exporting/graphite/integrations/blueflood.md b/src/exporting/graphite/integrations/blueflood.md
index a4c3c9793..1a6a21473 100644
--- a/exporting/graphite/integrations/blueflood.md
+++ b/src/exporting/graphite/integrations/blueflood.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/graphite/integrations/blueflood.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/graphite/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/graphite/integrations/blueflood.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/graphite/metadata.yaml"
sidebar_label: "Blueflood"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -38,7 +38,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -62,7 +62,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/graphite/integrations/graphite.md b/src/exporting/graphite/integrations/graphite.md
index fec988027..05ebc4dae 100644
--- a/exporting/graphite/integrations/graphite.md
+++ b/src/exporting/graphite/integrations/graphite.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/graphite/integrations/graphite.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/graphite/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/graphite/integrations/graphite.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/graphite/metadata.yaml"
sidebar_label: "Graphite"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -38,7 +38,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -62,7 +62,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/graphite/integrations/kairosdb.md b/src/exporting/graphite/integrations/kairosdb.md
index e35d2497c..1227203d5 100644
--- a/exporting/graphite/integrations/kairosdb.md
+++ b/src/exporting/graphite/integrations/kairosdb.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/graphite/integrations/kairosdb.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/graphite/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/graphite/integrations/kairosdb.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/graphite/metadata.yaml"
sidebar_label: "KairosDB"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -38,7 +38,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -62,7 +62,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/init_connectors.c b/src/exporting/init_connectors.c
index 5167a68c2..165046f67 100644
--- a/exporting/init_connectors.c
+++ b/src/exporting/init_connectors.c
@@ -5,7 +5,7 @@
#include "json/json.h"
#include "opentsdb/opentsdb.h"
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
#include "prometheus/remote_write/remote_write.h"
#endif
@@ -13,11 +13,11 @@
#include "aws_kinesis/aws_kinesis.h"
#endif
-#if ENABLE_EXPORTING_PUBSUB
+#ifdef ENABLE_EXPORTING_PUBSUB
#include "pubsub/pubsub.h"
#endif
-#if HAVE_MONGOC
+#ifdef HAVE_MONGOC
#include "mongodb/mongodb.h"
#endif
@@ -61,7 +61,7 @@ int init_connectors(struct engine *engine)
return 1;
break;
case EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE:
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
if (init_prometheus_remote_write_instance(instance) != 0)
return 1;
#endif
@@ -79,7 +79,7 @@ int init_connectors(struct engine *engine)
#endif
break;
case EXPORTING_CONNECTOR_TYPE_MONGODB:
-#if HAVE_MONGOC
+#ifdef HAVE_MONGOC
if (init_mongodb_instance(instance) != 0)
return 1;
#endif
@@ -99,7 +99,8 @@ int init_connectors(struct engine *engine)
snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "EXPORTING-%zu", instance->index);
uv_thread_set_name_np(instance->thread, threadname);
- send_statistics("EXPORTING_START", "OK", instance->config.type_name);
+ analytics_statistic_t statistic = { "EXPORTING_START", "OK", instance->config.type_name };
+ analytics_statistic_send(&statistic);
}
return 0;
diff --git a/exporting/json/README.md b/src/exporting/json/README.md
index 0a8793ca8..0a8793ca8 120000
--- a/exporting/json/README.md
+++ b/src/exporting/json/README.md
diff --git a/exporting/json/integrations/json.md b/src/exporting/json/integrations/json.md
index ab4699d99..94a8da386 100644
--- a/exporting/json/integrations/json.md
+++ b/src/exporting/json/integrations/json.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/json/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/json/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/json/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/json/metadata.yaml"
sidebar_label: "JSON"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -37,7 +37,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -62,7 +62,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/json/json.h b/src/exporting/json/json.h
index d916263a9..d916263a9 100644
--- a/exporting/json/json.h
+++ b/src/exporting/json/json.h
diff --git a/exporting/mongodb/README.md b/src/exporting/mongodb/README.md
index a28253054..a28253054 120000
--- a/exporting/mongodb/README.md
+++ b/src/exporting/mongodb/README.md
diff --git a/exporting/mongodb/mongodb.c b/src/exporting/mongodb/mongodb.c
index c65f8d4cc..c65f8d4cc 100644
--- a/exporting/mongodb/mongodb.c
+++ b/src/exporting/mongodb/mongodb.c
diff --git a/exporting/mongodb/mongodb.h b/src/exporting/mongodb/mongodb.h
index f1867b288..f1867b288 100644
--- a/exporting/mongodb/mongodb.h
+++ b/src/exporting/mongodb/mongodb.h
diff --git a/exporting/nc-exporting.sh b/src/exporting/nc-exporting.sh
index 740f65d18..740f65d18 100755
--- a/exporting/nc-exporting.sh
+++ b/src/exporting/nc-exporting.sh
diff --git a/exporting/opentsdb/README.md b/src/exporting/opentsdb/README.md
index fef534603..fef534603 120000
--- a/exporting/opentsdb/README.md
+++ b/src/exporting/opentsdb/README.md
diff --git a/exporting/opentsdb/integrations/opentsdb.md b/src/exporting/opentsdb/integrations/opentsdb.md
index 1c3087e0e..d9b0c7d16 100644
--- a/exporting/opentsdb/integrations/opentsdb.md
+++ b/src/exporting/opentsdb/integrations/opentsdb.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/opentsdb/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/opentsdb/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/opentsdb/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/opentsdb/metadata.yaml"
sidebar_label: "OpenTSDB"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -38,7 +38,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -62,7 +62,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/opentsdb/opentsdb.c b/src/exporting/opentsdb/opentsdb.c
index ffccb5b22..41f8e200a 100644
--- a/exporting/opentsdb/opentsdb.c
+++ b/src/exporting/opentsdb/opentsdb.c
@@ -186,15 +186,13 @@ int format_dimension_collected_opentsdb_telnet(struct instance *instance, RRDDIM
buffer_sprintf(
instance->buffer,
- "put %s.%s.%s %llu " COLLECTED_NUMBER_FORMAT " host=%s%s%s%s\n",
+ "put %s.%s.%s %llu " COLLECTED_NUMBER_FORMAT " host=%s%s\n",
instance->config.prefix,
chart_name,
dimension_name,
(unsigned long long)rd->collector.last_collected_time.tv_sec,
rd->collector.last_collected_value,
(host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
- (host->tags) ? " " : "",
- (host->tags) ? rrdhost_tags(host) : "",
(instance->labels_buffer) ? buffer_tostring(instance->labels_buffer) : "");
return 0;
@@ -232,15 +230,13 @@ int format_dimension_stored_opentsdb_telnet(struct instance *instance, RRDDIM *r
buffer_sprintf(
instance->buffer,
- "put %s.%s.%s %llu " NETDATA_DOUBLE_FORMAT " host=%s%s%s%s\n",
+ "put %s.%s.%s %llu " NETDATA_DOUBLE_FORMAT " host=%s%s\n",
instance->config.prefix,
chart_name,
dimension_name,
(unsigned long long)last_t,
value,
(host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
- (host->tags) ? " " : "",
- (host->tags) ? rrdhost_tags(host) : "",
(instance->labels_buffer) ? buffer_tostring(instance->labels_buffer) : "");
return 0;
@@ -326,7 +322,7 @@ int format_dimension_collected_opentsdb_http(struct instance *instance, RRDDIM *
"\"timestamp\":%llu,"
"\"value\":"COLLECTED_NUMBER_FORMAT","
"\"tags\":{"
- "\"host\":\"%s%s%s\"%s"
+ "\"host\":\"%s\"%s"
"}"
"}",
instance->config.prefix,
@@ -335,8 +331,6 @@ int format_dimension_collected_opentsdb_http(struct instance *instance, RRDDIM *
(unsigned long long)rd->collector.last_collected_time.tv_sec,
rd->collector.last_collected_value,
(host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
- (host->tags) ? " " : "",
- (host->tags) ? rrdhost_tags(host) : "",
instance->labels_buffer ? buffer_tostring(instance->labels_buffer) : "");
return 0;
@@ -382,7 +376,7 @@ int format_dimension_stored_opentsdb_http(struct instance *instance, RRDDIM *rd)
"\"timestamp\":%llu,"
"\"value\":" NETDATA_DOUBLE_FORMAT ","
"\"tags\":{"
- "\"host\":\"%s%s%s\"%s"
+ "\"host\":\"%s\"%s"
"}"
"}",
instance->config.prefix,
@@ -391,8 +385,6 @@ int format_dimension_stored_opentsdb_http(struct instance *instance, RRDDIM *rd)
(unsigned long long)last_t,
value,
(host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
- (host->tags) ? " " : "",
- (host->tags) ? rrdhost_tags(host) : "",
instance->labels_buffer ? buffer_tostring(instance->labels_buffer) : "");
return 0;
diff --git a/exporting/opentsdb/opentsdb.h b/src/exporting/opentsdb/opentsdb.h
index b544ba8c1..b544ba8c1 100644
--- a/exporting/opentsdb/opentsdb.h
+++ b/src/exporting/opentsdb/opentsdb.h
diff --git a/exporting/process_data.c b/src/exporting/process_data.c
index c7792fa55..4adf4b587 100644
--- a/exporting/process_data.c
+++ b/src/exporting/process_data.c
@@ -77,8 +77,8 @@ NETDATA_DOUBLE exporting_calculate_value_from_stored_data(
time_t before = instance->before;
// find the edges of the rrd database for this chart
- time_t first_t = storage_engine_oldest_time_s(rd->tiers[0].backend, rd->tiers[0].db_metric_handle);
- time_t last_t = storage_engine_latest_time_s(rd->tiers[0].backend, rd->tiers[0].db_metric_handle);
+ time_t first_t = storage_engine_oldest_time_s(rd->tiers[0].seb, rd->tiers[0].smh);
+ time_t last_t = storage_engine_latest_time_s(rd->tiers[0].seb, rd->tiers[0].smh);
time_t update_every = st->update_every;
struct storage_engine_query_handle handle;
@@ -126,7 +126,7 @@ NETDATA_DOUBLE exporting_calculate_value_from_stored_data(
size_t counter = 0;
NETDATA_DOUBLE sum = 0;
- for (storage_engine_query_init(rd->tiers[0].backend, rd->tiers[0].db_metric_handle, &handle, after, before, STORAGE_PRIORITY_SYNCHRONOUS); !storage_engine_query_is_finished(&handle);) {
+ for (storage_engine_query_init(rd->tiers[0].seb, rd->tiers[0].smh, &handle, after, before, STORAGE_PRIORITY_SYNCHRONOUS); !storage_engine_query_is_finished(&handle);) {
STORAGE_POINT sp = storage_engine_query_next_metric(&handle);
points_read++;
diff --git a/exporting/prometheus/integrations/appoptics.md b/src/exporting/prometheus/integrations/appoptics.md
index 292933200..babbe740b 100644
--- a/exporting/prometheus/integrations/appoptics.md
+++ b/src/exporting/prometheus/integrations/appoptics.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/appoptics.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/appoptics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "AppOptics"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/azure_data_explorer.md b/src/exporting/prometheus/integrations/azure_data_explorer.md
index aa8710aae..6e4d25be7 100644
--- a/exporting/prometheus/integrations/azure_data_explorer.md
+++ b/src/exporting/prometheus/integrations/azure_data_explorer.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/azure_data_explorer.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/azure_data_explorer.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Azure Data Explorer"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/azure_event_hub.md b/src/exporting/prometheus/integrations/azure_event_hub.md
index bc8a0c9e1..4fa39ccd6 100644
--- a/exporting/prometheus/integrations/azure_event_hub.md
+++ b/src/exporting/prometheus/integrations/azure_event_hub.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/azure_event_hub.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/azure_event_hub.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Azure Event Hub"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/chronix.md b/src/exporting/prometheus/integrations/chronix.md
index 9794a624c..159961483 100644
--- a/exporting/prometheus/integrations/chronix.md
+++ b/src/exporting/prometheus/integrations/chronix.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/chronix.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/chronix.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Chronix"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/cortex.md b/src/exporting/prometheus/integrations/cortex.md
index 784c62ce2..987d07a75 100644
--- a/exporting/prometheus/integrations/cortex.md
+++ b/src/exporting/prometheus/integrations/cortex.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/cortex.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/cortex.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Cortex"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/cratedb.md b/src/exporting/prometheus/integrations/cratedb.md
index 75a46391d..b57f9aa67 100644
--- a/exporting/prometheus/integrations/cratedb.md
+++ b/src/exporting/prometheus/integrations/cratedb.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/cratedb.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/cratedb.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "CrateDB"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/gnocchi.md b/src/exporting/prometheus/integrations/gnocchi.md
index a61986c19..317e38b31 100644
--- a/exporting/prometheus/integrations/gnocchi.md
+++ b/src/exporting/prometheus/integrations/gnocchi.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/gnocchi.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/gnocchi.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Gnocchi"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/google_bigquery.md b/src/exporting/prometheus/integrations/google_bigquery.md
index aec0a9a5b..96c6002cc 100644
--- a/exporting/prometheus/integrations/google_bigquery.md
+++ b/src/exporting/prometheus/integrations/google_bigquery.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/google_bigquery.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/google_bigquery.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Google BigQuery"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/irondb.md b/src/exporting/prometheus/integrations/irondb.md
index 450f88339..52c5fc489 100644
--- a/exporting/prometheus/integrations/irondb.md
+++ b/src/exporting/prometheus/integrations/irondb.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/irondb.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/irondb.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "IRONdb"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/m3db.md b/src/exporting/prometheus/integrations/m3db.md
index 689e8e851..4b7e8adc4 100644
--- a/exporting/prometheus/integrations/m3db.md
+++ b/src/exporting/prometheus/integrations/m3db.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/m3db.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/m3db.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "M3DB"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/metricfire.md b/src/exporting/prometheus/integrations/metricfire.md
index 2d69e33f6..c925b51b8 100644
--- a/exporting/prometheus/integrations/metricfire.md
+++ b/src/exporting/prometheus/integrations/metricfire.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/metricfire.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/metricfire.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "MetricFire"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/prometheus_remote_write.md b/src/exporting/prometheus/integrations/prometheus_remote_write.md
index b9ce730ea..e33fc652d 100644
--- a/exporting/prometheus/integrations/prometheus_remote_write.md
+++ b/src/exporting/prometheus/integrations/prometheus_remote_write.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/prometheus_remote_write.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/prometheus_remote_write.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Prometheus Remote Write"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/quasardb.md b/src/exporting/prometheus/integrations/quasardb.md
index 48d2419e0..55e022600 100644
--- a/exporting/prometheus/integrations/quasardb.md
+++ b/src/exporting/prometheus/integrations/quasardb.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/quasardb.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/quasardb.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "QuasarDB"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/splunk_signalfx.md b/src/exporting/prometheus/integrations/splunk_signalfx.md
index 324101b20..1293323f4 100644
--- a/exporting/prometheus/integrations/splunk_signalfx.md
+++ b/src/exporting/prometheus/integrations/splunk_signalfx.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/splunk_signalfx.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/splunk_signalfx.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Splunk SignalFx"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/thanos.md b/src/exporting/prometheus/integrations/thanos.md
index 77fe11595..271cc0075 100644
--- a/exporting/prometheus/integrations/thanos.md
+++ b/src/exporting/prometheus/integrations/thanos.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/thanos.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/thanos.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Thanos"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/tikv.md b/src/exporting/prometheus/integrations/tikv.md
index 656ee695b..d075d277c 100644
--- a/exporting/prometheus/integrations/tikv.md
+++ b/src/exporting/prometheus/integrations/tikv.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/tikv.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/tikv.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "TiKV"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/timescaledb.md b/src/exporting/prometheus/integrations/timescaledb.md
index 681a0a618..8067ccc50 100644
--- a/exporting/prometheus/integrations/timescaledb.md
+++ b/src/exporting/prometheus/integrations/timescaledb.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/timescaledb.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/timescaledb.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "TimescaleDB"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/victoriametrics.md b/src/exporting/prometheus/integrations/victoriametrics.md
index 114aefc83..bc3f3a23a 100644
--- a/exporting/prometheus/integrations/victoriametrics.md
+++ b/src/exporting/prometheus/integrations/victoriametrics.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/victoriametrics.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/victoriametrics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "VictoriaMetrics"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/vmware_aria.md b/src/exporting/prometheus/integrations/vmware_aria.md
index 493d3550c..e5fba3c23 100644
--- a/exporting/prometheus/integrations/vmware_aria.md
+++ b/src/exporting/prometheus/integrations/vmware_aria.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/vmware_aria.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/vmware_aria.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "VMware Aria"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/wavefront.md b/src/exporting/prometheus/integrations/wavefront.md
index a6bab0566..80559efb7 100644
--- a/exporting/prometheus/integrations/wavefront.md
+++ b/src/exporting/prometheus/integrations/wavefront.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/wavefront.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/wavefront.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Wavefront"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/prometheus.c b/src/exporting/prometheus/prometheus.c
index 6644e1799..037539572 100644
--- a/exporting/prometheus/prometheus.c
+++ b/src/exporting/prometheus/prometheus.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-3.0-or-later
-#define EXPORTINGS_INTERNALS
#include "prometheus.h"
// ----------------------------------------------------------------------------
@@ -375,59 +374,55 @@ static int print_host_variables_callback(const DICTIONARY_ITEM *item __maybe_unu
struct host_variables_callback_options *opts = data;
- if (rrdvar_flags(rv) & (RRDVAR_FLAG_CUSTOM_HOST_VAR | RRDVAR_FLAG_CUSTOM_CHART_VAR)) {
- if (!opts->host_header_printed) {
- opts->host_header_printed = 1;
+ if (!opts->host_header_printed) {
+ opts->host_header_printed = 1;
- if (opts->output_options & PROMETHEUS_OUTPUT_HELP) {
- buffer_sprintf(opts->wb, "\n# COMMENT global host and chart variables\n");
- }
+ if (opts->output_options & PROMETHEUS_OUTPUT_HELP) {
+ buffer_sprintf(opts->wb, "\n# COMMENT global host and chart variables\n");
}
+ }
- NETDATA_DOUBLE value = rrdvar2number(rv);
- if (isnan(value) || isinf(value)) {
- if (opts->output_options & PROMETHEUS_OUTPUT_HELP)
- buffer_sprintf(
- opts->wb, "# COMMENT variable \"%s\" is %s. Skipped.\n", rrdvar_name(rv), (isnan(value)) ? "NAN" : "INF");
-
- return 0;
- }
+ NETDATA_DOUBLE value = rrdvar2number(rv);
+ if (isnan(value) || isinf(value)) {
+ if (opts->output_options & PROMETHEUS_OUTPUT_HELP)
+ buffer_sprintf(
+ opts->wb, "# COMMENT variable \"%s\" is %s. Skipped.\n", rrdvar_name(rv), (isnan(value)) ? "NAN" : "INF");
- char *label_pre = "";
- char *label_post = "";
- if (opts->labels && *opts->labels) {
- label_pre = "{";
- label_post = "}";
- }
+ return 0;
+ }
- prometheus_name_copy(opts->name, rrdvar_name(rv), sizeof(opts->name));
+ char *label_pre = "";
+ char *label_post = "";
+ if (opts->labels && *opts->labels) {
+ label_pre = "{";
+ label_post = "}";
+ }
- if (opts->output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
- buffer_sprintf(
- opts->wb,
- "%s_%s%s%s%s " NETDATA_DOUBLE_FORMAT " %llu\n",
- opts->prefix,
- opts->name,
- label_pre,
- opts->labels,
- label_post,
- value,
- opts->now * 1000ULL);
- else
- buffer_sprintf(
- opts->wb,
- "%s_%s%s%s%s " NETDATA_DOUBLE_FORMAT "\n",
- opts->prefix,
- opts->name,
- label_pre,
- opts->labels,
- label_post,
- value);
+ prometheus_name_copy(opts->name, rrdvar_name(rv), sizeof(opts->name));
- return 1;
- }
+ if (opts->output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(
+ opts->wb,
+ "%s_%s%s%s%s " NETDATA_DOUBLE_FORMAT " %llu\n",
+ opts->prefix,
+ opts->name,
+ label_pre,
+ opts->labels,
+ label_post,
+ value,
+ opts->now * 1000ULL);
+ else
+ buffer_sprintf(
+ opts->wb,
+ "%s_%s%s%s%s " NETDATA_DOUBLE_FORMAT "\n",
+ opts->prefix,
+ opts->name,
+ label_pre,
+ opts->labels,
+ label_post,
+ value);
- return 0;
+ return 1;
}
struct gen_parameters {
@@ -527,6 +522,86 @@ static void generate_as_collected_prom_metric(BUFFER *wb,
buffer_sprintf(wb, "\n");
}
+static void prometheus_print_os_info(
+ BUFFER *wb,
+ RRDHOST *host,
+ PROMETHEUS_OUTPUT_OPTIONS output_options)
+{
+ FILE *fp;
+ char filename[FILENAME_MAX + 1];
+ char buf[BUFSIZ + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/etc/os-release");
+ fp = fopen(filename, "r");
+ if (!fp) {
+ /* Fallback to lsb-release */
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/etc/lsb-release");
+ fp = fopen(filename, "r");
+ }
+ if (!fp) {
+ return;
+ }
+
+ buffer_sprintf(wb, "netdata_os_info{instance=\"%s\"", rrdhost_hostname(host));
+
+ while (fgets(buf, BUFSIZ, fp)) {
+ char *in, *sanitized;
+ char *key, *val;
+ int in_val_part = 0;
+
+ /* sanitize the line */
+ sanitized = in = buf;
+ in_val_part = 0;
+ while (*in && *in != '\n') {
+ if (!in_val_part) {
+ /* Only accepts alphabetic characters and '_'
+ * in key part */
+ if (isalpha(*in) || *in == '_') {
+ *(sanitized++) = tolower(*in);
+ } else if (*in == '=') {
+ in_val_part = 1;
+ *(sanitized++) = '=';
+ }
+ } else {
+ /* Don't accept special characters in
+ * value part */
+ switch (*in) {
+ case '"':
+ case '\'':
+ case '\r':
+ case '\t':
+ break;
+ default:
+ if (isprint(*in)) {
+ *(sanitized++) = *in;
+ }
+ }
+ }
+ in++;
+ }
+ /* Terminate the string */
+ *(sanitized++) = '\0';
+
+ /* Split key/val */
+ key = buf;
+ val = strchr(buf, '=');
+
+ /* If we have a key/value pair, add it as a label */
+ if (val) {
+ *val = '\0';
+ val++;
+ buffer_sprintf(wb, ",%s=\"%s\"", key, val);
+ }
+ }
+
+ /* Finish the line */
+ if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(wb, "} 1 %llu\n", now_realtime_usec() / USEC_PER_MS);
+ else
+ buffer_sprintf(wb, "} 1\n");
+
+ fclose(fp);
+}
/**
* Write metrics in Prometheus format to a buffer.
*
@@ -580,6 +655,9 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
if (instance->labels_buffer)
buffer_flush(instance->labels_buffer);
+ if (instance->config.options & EXPORTING_OPTION_SEND_AUTOMATIC_LABELS)
+ prometheus_print_os_info(wb, host, output_options);
+
// send custom variables set for the host
if (output_options & PROMETHEUS_OUTPUT_VARIABLES) {
diff --git a/exporting/prometheus/prometheus.h b/src/exporting/prometheus/prometheus.h
index e80b682ae..d7c1a4119 100644
--- a/exporting/prometheus/prometheus.h
+++ b/src/exporting/prometheus/prometheus.h
@@ -9,8 +9,6 @@
#define PROMETHEUS_LABELS_MAX 1024
#define PROMETHEUS_VARIABLE_MAX 256
-#define PROMETHEUS_LABELS_MAX_NUMBER 128
-
typedef enum prometheus_output_flags {
PROMETHEUS_OUTPUT_NONE = 0,
PROMETHEUS_OUTPUT_HELP = (1 << 0),
diff --git a/exporting/prometheus/remote_write/README.md b/src/exporting/prometheus/remote_write/README.md
index 8ca4673a6..8ca4673a6 120000
--- a/exporting/prometheus/remote_write/README.md
+++ b/src/exporting/prometheus/remote_write/README.md
diff --git a/exporting/prometheus/remote_write/remote_write.c b/src/exporting/prometheus/remote_write/remote_write.c
index ed431c9dc..b4b6f996b 100644
--- a/exporting/prometheus/remote_write/remote_write.c
+++ b/src/exporting/prometheus/remote_write/remote_write.c
@@ -330,24 +330,22 @@ static int format_variable_prometheus_remote_write_callback(const DICTIONARY_ITE
struct prometheus_remote_write_variables_callback_options *opts = data;
- if (rrdvar_flags(rv) & (RRDVAR_FLAG_CUSTOM_HOST_VAR | RRDVAR_FLAG_CUSTOM_CHART_VAR)) {
- RRDHOST *host = opts->host;
- struct instance *instance = opts->instance;
- struct simple_connector_data *simple_connector_data =
- (struct simple_connector_data *)instance->connector_specific_data;
- struct prometheus_remote_write_specific_data *connector_specific_data =
- (struct prometheus_remote_write_specific_data *)simple_connector_data->connector_specific_data;
+ RRDHOST *host = opts->host;
+ struct instance *instance = opts->instance;
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ (struct prometheus_remote_write_specific_data *)simple_connector_data->connector_specific_data;
- char name[PROMETHEUS_LABELS_MAX + 1];
- char *suffix = "";
+ char name[PROMETHEUS_LABELS_MAX + 1];
+ char *suffix = "";
- prometheus_name_copy(context, rrdvar_name(rv), PROMETHEUS_ELEMENT_MAX);
- snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", instance->config.prefix, context, suffix);
+ prometheus_name_copy(context, rrdvar_name(rv), PROMETHEUS_ELEMENT_MAX);
+ snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", instance->config.prefix, context, suffix);
- NETDATA_DOUBLE value = rrdvar2number(rv);
- add_variable(connector_specific_data->write_request, name,
- (host == localhost) ? instance->config.hostname : rrdhost_hostname(host), value, opts->now / USEC_PER_MS);
- }
+ NETDATA_DOUBLE value = rrdvar2number(rv);
+ add_variable(connector_specific_data->write_request, name,
+ (host == localhost) ? instance->config.hostname : rrdhost_hostname(host), value, opts->now / USEC_PER_MS);
return 0;
}
diff --git a/exporting/prometheus/remote_write/remote_write.h b/src/exporting/prometheus/remote_write/remote_write.h
index d4e86494b..d4e86494b 100644
--- a/exporting/prometheus/remote_write/remote_write.h
+++ b/src/exporting/prometheus/remote_write/remote_write.h
diff --git a/exporting/prometheus/remote_write/remote_write.proto b/src/exporting/prometheus/remote_write/remote_write.proto
index dfde254e1..dfde254e1 100644
--- a/exporting/prometheus/remote_write/remote_write.proto
+++ b/src/exporting/prometheus/remote_write/remote_write.proto
diff --git a/exporting/prometheus/remote_write/remote_write_request.cc b/src/exporting/prometheus/remote_write/remote_write_request.cc
index a628082d1..a628082d1 100644
--- a/exporting/prometheus/remote_write/remote_write_request.cc
+++ b/src/exporting/prometheus/remote_write/remote_write_request.cc
diff --git a/exporting/prometheus/remote_write/remote_write_request.h b/src/exporting/prometheus/remote_write/remote_write_request.h
index b25370133..b25370133 100644
--- a/exporting/prometheus/remote_write/remote_write_request.h
+++ b/src/exporting/prometheus/remote_write/remote_write_request.h
diff --git a/exporting/pubsub/README.md b/src/exporting/pubsub/README.md
index 8633f1725..8633f1725 120000
--- a/exporting/pubsub/README.md
+++ b/src/exporting/pubsub/README.md
diff --git a/exporting/pubsub/integrations/google_cloud_pub_sub.md b/src/exporting/pubsub/integrations/google_cloud_pub_sub.md
index c24833146..ce2ba2865 100644
--- a/exporting/pubsub/integrations/google_cloud_pub_sub.md
+++ b/src/exporting/pubsub/integrations/google_cloud_pub_sub.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/pubsub/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/pubsub/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/pubsub/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/pubsub/metadata.yaml"
sidebar_label: "Google Cloud Pub Sub"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -39,7 +39,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -64,7 +64,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/pubsub/pubsub.c b/src/exporting/pubsub/pubsub.c
index 4989160a4..4989160a4 100644
--- a/exporting/pubsub/pubsub.c
+++ b/src/exporting/pubsub/pubsub.c
diff --git a/exporting/pubsub/pubsub.h b/src/exporting/pubsub/pubsub.h
index 0bcb76f9b..0bcb76f9b 100644
--- a/exporting/pubsub/pubsub.h
+++ b/src/exporting/pubsub/pubsub.h
diff --git a/exporting/pubsub/pubsub_publish.cc b/src/exporting/pubsub/pubsub_publish.cc
index cc14154f8..cc14154f8 100644
--- a/exporting/pubsub/pubsub_publish.cc
+++ b/src/exporting/pubsub/pubsub_publish.cc
diff --git a/exporting/pubsub/pubsub_publish.h b/src/exporting/pubsub/pubsub_publish.h
index 567a262f0..567a262f0 100644
--- a/exporting/pubsub/pubsub_publish.h
+++ b/src/exporting/pubsub/pubsub_publish.h
diff --git a/exporting/read_config.c b/src/exporting/read_config.c
index cd8af6bf6..cd8af6bf6 100644
--- a/exporting/read_config.c
+++ b/src/exporting/read_config.c
diff --git a/exporting/sample-metadata.yaml b/src/exporting/sample-metadata.yaml
index 41a287aeb..41a287aeb 100644
--- a/exporting/sample-metadata.yaml
+++ b/src/exporting/sample-metadata.yaml
diff --git a/exporting/send_data.c b/src/exporting/send_data.c
index e8b8aaf60..187a6828a 100644
--- a/exporting/send_data.c
+++ b/src/exporting/send_data.c
@@ -31,7 +31,7 @@ static int exporting_tls_is_enabled(EXPORTING_CONNECTOR_TYPE type __maybe_unused
* @return Always returns 0.
*/
int exporting_discard_response(BUFFER *buffer, struct instance *instance) {
-#if NETDATA_INTERNAL_CHECKS
+#ifdef NETDATA_INTERNAL_CHECKS
char sample[1024];
const char *s = buffer_tostring(buffer);
char *d = sample, *e = &sample[sizeof(sample) - 1];
@@ -391,7 +391,7 @@ void simple_connector_worker(void *instance_p)
#endif
}
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
if (instance->config.type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE)
clean_prometheus_remote_write(instance);
#endif
diff --git a/exporting/send_internal_metrics.c b/src/exporting/send_internal_metrics.c
index 677a57bbb..677a57bbb 100644
--- a/exporting/send_internal_metrics.c
+++ b/src/exporting/send_internal_metrics.c