summaryrefslogtreecommitdiffstats
path: root/exporting
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2021-02-07 11:45:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2021-02-07 11:45:55 +0000
commita8220ab2d293bb7f4b014b79d16b2fb05090fa93 (patch)
tree77f0a30f016c0925cf7ee9292e644bba183c2774 /exporting
parentAdding upstream version 1.19.0. (diff)
downloadnetdata-a8220ab2d293bb7f4b014b79d16b2fb05090fa93.tar.xz
netdata-a8220ab2d293bb7f4b014b79d16b2fb05090fa93.zip
Adding upstream version 1.29.0.upstream/1.29.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'exporting')
-rw-r--r--exporting/Makefile.am29
-rw-r--r--exporting/README.md312
-rw-r--r--exporting/TIMESCALE.md69
-rw-r--r--exporting/WALKTHROUGH.md259
-rw-r--r--exporting/aws_kinesis/Makefile.am8
-rw-r--r--exporting/aws_kinesis/README.md58
-rw-r--r--exporting/aws_kinesis/aws_kinesis.c217
-rw-r--r--exporting/aws_kinesis/aws_kinesis.h16
-rw-r--r--exporting/aws_kinesis/aws_kinesis_put_record.cc151
-rw-r--r--exporting/aws_kinesis/aws_kinesis_put_record.h35
-rw-r--r--exporting/check_filters.c78
-rw-r--r--exporting/clean_connectors.c81
-rw-r--r--exporting/exporting.conf89
-rw-r--r--exporting/exporting_engine.c143
-rw-r--r--exporting/exporting_engine.h304
-rw-r--r--exporting/graphite/Makefile.am4
-rw-r--r--exporting/graphite/README.md30
-rw-r--r--exporting/graphite/graphite.c228
-rw-r--r--exporting/graphite/graphite.h18
-rw-r--r--exporting/init_connectors.c145
-rw-r--r--exporting/json/Makefile.am4
-rw-r--r--exporting/json/README.md30
-rw-r--r--exporting/json/json.c362
-rw-r--r--exporting/json/json.h21
-rw-r--r--exporting/mongodb/Makefile.am8
-rw-r--r--exporting/mongodb/README.md38
-rw-r--r--exporting/mongodb/mongodb.c387
-rw-r--r--exporting/mongodb/mongodb.h35
-rwxr-xr-xexporting/nc-exporting.sh158
-rw-r--r--exporting/opentsdb/Makefile.am8
-rw-r--r--exporting/opentsdb/README.md30
-rw-r--r--exporting/opentsdb/opentsdb.c422
-rw-r--r--exporting/opentsdb/opentsdb.h26
-rw-r--r--exporting/process_data.c426
-rw-r--r--exporting/prometheus/Makefile.am12
-rw-r--r--exporting/prometheus/README.md461
-rw-r--r--exporting/prometheus/prometheus.c916
-rw-r--r--exporting/prometheus/prometheus.h41
-rw-r--r--exporting/prometheus/remote_write/Makefile.am14
-rw-r--r--exporting/prometheus/remote_write/README.md51
-rw-r--r--exporting/prometheus/remote_write/remote_write.c345
-rw-r--r--exporting/prometheus/remote_write/remote_write.h25
-rw-r--r--exporting/prometheus/remote_write/remote_write.proto29
-rw-r--r--exporting/prometheus/remote_write/remote_write_request.cc186
-rw-r--r--exporting/prometheus/remote_write/remote_write_request.h33
-rw-r--r--exporting/pubsub/Makefile.am8
-rw-r--r--exporting/pubsub/README.md45
-rw-r--r--exporting/pubsub/pubsub.c194
-rw-r--r--exporting/pubsub/pubsub.h14
-rw-r--r--exporting/pubsub/pubsub_publish.cc258
-rw-r--r--exporting/pubsub/pubsub_publish.h37
-rw-r--r--exporting/read_config.c503
-rw-r--r--exporting/send_data.c440
-rw-r--r--exporting/send_internal_metrics.c172
-rw-r--r--exporting/tests/Makefile.am4
-rw-r--r--exporting/tests/exporting_doubles.c395
-rw-r--r--exporting/tests/exporting_fixtures.c163
-rw-r--r--exporting/tests/netdata_doubles.c247
-rw-r--r--exporting/tests/system_doubles.c61
-rw-r--r--exporting/tests/test_exporting_engine.c1939
-rw-r--r--exporting/tests/test_exporting_engine.h203
61 files changed, 11025 insertions, 0 deletions
diff --git a/exporting/Makefile.am b/exporting/Makefile.am
new file mode 100644
index 000000000..41fcac0bd
--- /dev/null
+++ b/exporting/Makefile.am
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ tests \
+ graphite \
+ json \
+ opentsdb \
+ prometheus \
+ aws_kinesis \
+ pubsub \
+ mongodb \
+ $(NULL)
+
+dist_libconfig_DATA = \
+ exporting.conf \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ TIMESCALE.md \
+ WALKTHROUGH.md \
+ $(NULL)
+
+dist_noinst_SCRIPTS = \
+ nc-exporting.sh \
+ $(NULL)
diff --git a/exporting/README.md b/exporting/README.md
new file mode 100644
index 000000000..933de0e07
--- /dev/null
+++ b/exporting/README.md
@@ -0,0 +1,312 @@
+<!--
+title: "Exporting engine reference"
+description: "With the exporting engine, you can archive your Netdata metrics to multiple external databases for long-term storage or further analysis."
+sidebar_label: Reference guide
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/README.md
+-->
+
+# Exporting engine reference
+
+Welcome to the exporting engine reference guide. This guide contains comprehensive information about enabling,
+configuring, and monitoring Netdata's exporting engine, which allows you to send metrics to external time-series
+databases.
+
+For a quick introduction to the exporting engine's features, read our doc on [exporting metrics to time-series
+databases](/docs/export/external-databases.md), or jump in to [enabling a connector](/docs/export/enable-connector.md).
+
+The exporting engine has a modular structure and supports metric exporting via multiple exporting connector instances at
+the same time. You can have different update intervals and filters configured for every exporting connector instance.
+
+When you enable the exporting engine and a connector, the Netdata Agent exports metrics _beginning from the time you
+restart its process_, not the entire [database of long-term metrics](/docs/store/change-metrics-storage.md).
+
+The exporting engine has its own configuration file `exporting.conf`. The configuration is almost similar to the
+deprecated [backends](/backends/README.md#configuration) system. The most important difference is that type of a
+connector should be specified in a section name before a colon and an instance name after the colon. Also, you can't use
+`host tags` anymore. Set your labels using the [`[host labels]`](/docs/guides/using-host-labels.md) section in
+`netdata.conf`.
+
+Since Netdata collects thousands of metrics per server per second, which would easily congest any database server when
+several Netdata servers are sending data to it, Netdata allows sending metrics at a lower frequency, by resampling them.
+
+So, although Netdata collects metrics every second, it can send to the external database servers averages or sums every
+X seconds (though, it can send them per second if you need it to).
+
+## Features
+
+1. The exporting engine uses a number of connectors to send Netdata metrics to external time-series databases. See our
+ [list of supported databases](/docs/export/external-databases.md#supported-databases) for information on which
+ connector to enable and configure for your database of choice.
+
+ - [**AWS Kinesis Data Streams**](/exporting/aws_kinesis/README.md): Metrics are sent to the service in `JSON`
+ format.
+ - [**Google Cloud Pub/Sub Service**](/exporting/pubsub/README.md): Metrics are sent to the service in `JSON`
+ format.
+ - [**Graphite**](/exporting/graphite/README.md): A plaintext interface. Metrics are sent to the database server as
+ `prefix.hostname.chart.dimension`. `prefix` is configured below, `hostname` is the hostname of the machine (can
+ also be configured). Learn more in our guide to [export and visualize Netdata metrics in
+ Graphite](/docs/guides/export/export-netdata-metrics-graphite.md).
+ - [**JSON** document databases](/exporting/json/README.md)
+ - [**OpenTSDB**](/exporting/opentsdb/README.md): Use a plaintext or HTTP interfaces. Metrics are sent to
+ OpenTSDB as `prefix.chart.dimension` with tag `host=hostname`.
+ - [**MongoDB**](/exporting/mongodb/README.md): Metrics are sent to the database in `JSON` format.
+ - [**Prometheus**](/exporting/prometheus/README.md): Use an existing Prometheus installation to scrape metrics
+ from node using the Netdata API.
+ - [**Prometheus remote write**](/exporting/prometheus/remote_write/README.md). A binary snappy-compressed protocol
+ buffer encoding over HTTP. Supports many [storage
+ providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
+ - [**TimescaleDB**](/exporting/TIMESCALE.md): Use a community-built connector that takes JSON streams from a
+ Netdata client and writes them to a TimescaleDB table.
+
+2. Netdata can filter metrics (at the chart level), to send only a subset of the collected metrics.
+
+3. Netdata supports three modes of operation for all exporting connectors:
+
+ - `as-collected` sends to external databases the metrics as they are collected, in the units they are collected.
+ So, counters are sent as counters and gauges are sent as gauges, much like all data collectors do. For example,
+ to calculate CPU utilization in this format, you need to know how to convert kernel ticks to percentage.
+
+ - `average` sends to external databases normalized metrics from the Netdata database. In this mode, all metrics
+ are sent as gauges, in the units Netdata uses. This abstracts data collection and simplifies visualization, but
+ you will not be able to copy and paste queries from other sources to convert units. For example, CPU utilization
+ percentage is calculated by Netdata, so Netdata will convert ticks to percentage and send the average percentage
+ to the external database.
+
+ - `sum` or `volume`: the sum of the interpolated values shown on the Netdata graphs is sent to the external
+ database. So, if Netdata is configured to send data to the database every 10 seconds, the sum of the 10 values
+ shown on the Netdata charts will be used.
+
+ Time-series databases suggest to collect the raw values (`as-collected`). If you plan to invest on building your
+ monitoring around a time-series database and you already know (or you will invest in learning) how to convert units
+ and normalize the metrics in Grafana or other visualization tools, we suggest to use `as-collected`.
+
+ If, on the other hand, you just need long term archiving of Netdata metrics and you plan to mainly work with
+ Netdata, we suggest to use `average`. It decouples visualization from data collection, so it will generally be a lot
+ simpler. Furthermore, if you use `average`, the charts shown in the external service will match exactly what you
+ see in Netdata, which is not necessarily true for the other modes of operation.
+
+4. This code is smart enough, not to slow down Netdata, independently of the speed of the external database server. You
+ should keep in mind though that many exporting connector instances can consume a lot of CPU resources if they run
+ their batches at the same time. You can set different update intervals for every exporting connector instance, but
+ even in that case they can occasionally synchronize their batches for a moment.
+
+## Configuration
+
+Here are the configuration blocks for every supported connector. Your current `exporting.conf` file may look a little
+different.
+
+You can configure each connector individually using the available [options](#options). The
+`[graphite:my_graphite_instance]` block contains examples of some of these additional options in action.
+
+```conf
+[exporting:global]
+ enabled = yes
+ send configured labels = no
+ send automatic labels = no
+ update every = 10
+
+[prometheus:exporter]
+ send names instead of ids = yes
+ send configured labels = yes
+ end automatic labels = no
+ send charts matching = *
+ send hosts matching = localhost *
+ prefix = netdata
+
+[graphite:my_graphite_instance]
+ enabled = yes
+ destination = localhost:2003
+ data source = average
+ prefix = Netdata
+ hostname = my-name
+ update every = 10
+ buffer on failures = 10
+ timeout ms = 20000
+ send charts matching = *
+ send hosts matching = localhost *
+ send names instead of ids = yes
+ send configured labels = yes
+ send automatic labels = yes
+
+[prometheus_remote_write:my_prometheus_remote_write_instance]
+ enabled = yes
+ destination = localhost
+ remote write URL path = /receive
+
+[kinesis:my_kinesis_instance]
+ enabled = yes
+ destination = us-east-1
+ stream name = netdata
+ aws_access_key_id = my_access_key_id
+ aws_secret_access_key = my_aws_secret_access_key
+
+[pubsub:my_pubsub_instance]
+ enabled = yes
+ destination = pubsub.googleapis.com
+ credentials file = /etc/netdata/pubsub_credentials.json
+ project id = my_project
+ topic id = my_topic
+
+[mongodb:my_mongodb_instance]
+ enabled = yes
+ destination = localhost
+ database = my_database
+ collection = my_collection
+
+[json:my_json_instance]
+ enabled = yes
+ destination = localhost:5448
+
+[opentsdb:my_opentsdb_plaintext_instance]
+ enabled = yes
+ destination = localhost:4242
+
+[opentsdb:http:my_opentsdb_http_instance]
+ enabled = yes
+ destination = localhost:4242
+
+[opentsdb:https:my_opentsdb_https_instance]
+ enabled = yes
+ destination = localhost:8082
+```
+
+### Sections
+
+- `[exporting:global]` is a section where you can set your defaults for all exporting connectors
+- `[prometheus:exporter]` defines settings for Prometheus exporter API queries (e.g.:
+ `http://NODE:19999/api/v1/allmetrics?format=prometheus&help=yes&source=as-collected`).
+- `[<type>:<name>]` keeps settings for a particular exporting connector instance, where:
+ - `type` selects the exporting connector type: graphite | opentsdb:telnet | opentsdb:http |
+ prometheus_remote_write | json | kinesis | pubsub | mongodb. For graphite, opentsdb,
+ json, and prometheus_remote_write connectors you can also use `:http` or `:https` modifiers
+ (e.g.: `opentsdb:https`).
+ - `name` can be arbitrary instance name you chose.
+
+### Options
+
+Configure individual connectors and override any global settings with the following options.
+
+- `enabled = yes | no`, enables or disables an exporting connector instance
+
+- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, IPs (IPv4 and IPv6) and
+ ports to connect to. Netdata will use the **first available** to send the metrics.
+
+ The format of each item in this list, is: `[PROTOCOL:]IP[:PORT]`.
+
+ `PROTOCOL` can be `udp` or `tcp`. `tcp` is the default and only supported by the current exporting engine.
+
+ `IP` can be `XX.XX.XX.XX` (IPv4), or `[XX:XX...XX:XX]` (IPv6). For IPv6 you can to enclose the IP in `[]` to
+ separate it from the port.
+
+ `PORT` can be a number of a service name. If omitted, the default port for the exporting connector will be used
+ (graphite = 2003, opentsdb = 4242).
+
+ Example IPv4:
+
+```conf
+ destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242
+```
+
+ Example IPv6 and IPv4 together:
+
+```conf
+ destination = [ffff:...:0001]:2003 10.11.12.1:2003
+```
+
+ When multiple servers are defined, Netdata will try the next one when the previous one fails.
+
+ Netdata also ships `nc-exporting.sh`, a script that can be used as a fallback exporting connector to save the
+ metrics to disk and push them to the time-series database when it becomes available again. It can also be used to
+ monitor / trace / debug the metrics Netdata generates.
+
+ For the Kinesis exporting connector `destination` should be set to an AWS region (for example, `us-east-1`).
+
+ For the MongoDB exporting connector `destination` should be set to a
+ [MongoDB URI](https://docs.mongodb.com/manual/reference/connection-string/).
+
+ For the Pub/Sub exporting connector `destination` can be set to a specific service endpoint.
+
+- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of data that will
+ be sent to the external database.
+
+- `hostname = my-name`, is the hostname to be used for sending data to the external database server. By default this
+ is `[global].hostname`.
+
+- `prefix = Netdata`, is the prefix to add to all metrics.
+
+- `update every = 10`, is the number of seconds between sending data to the external database. Netdata will add some
+ randomness to this number, to prevent stressing the external server when many Netdata servers send data to the same
+ database. This randomness does not affect the quality of the data, only the time they are sent.
+
+- `buffer on failures = 10`, is the number of iterations (each iteration is `update every` seconds) to buffer data,
+ when the external database server is not available. If the server fails to receive the data after that many
+ failures, data loss on the connector instance is expected (Netdata will also log it).
+
+- `timeout ms = 20000`, is the timeout in milliseconds to wait for the external database server to process the data.
+ By default this is `2 * update_every * 1000`.
+
+- `send hosts matching = localhost *` includes one or more space separated patterns, using `*` as wildcard (any number
+ of times within each pattern). The patterns are checked against the hostname (the localhost is always checked as
+ `localhost`), allowing us to filter which hosts will be sent to the external database when this Netdata is a central
+ Netdata aggregating multiple hosts. A pattern starting with `!` gives a negative match. So to match all hosts named
+ `*db*` except hosts containing `*child*`, use `!*child* *db*` (so, the order is important: the first
+ pattern matching the hostname will be used - positive or negative).
+
+- `send charts matching = *` includes one or more space separated patterns, using `*` as wildcard (any number of times
+ within each pattern). The patterns are checked against both chart id and chart name. A pattern starting with `!`
+ gives a negative match. So to match all charts named `apps.*` except charts ending in `*reads`, use `!*reads
+ apps.*` (so, the order is important: the first pattern matching the chart id or the chart name will be used -
+ positive or negative).
+
+- `send names instead of ids = yes | no` controls the metric names Netdata should send to the external database.
+ Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system
+ and names are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several
+ cases they are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+
+- `send configured labels = yes | no` controls if labels defined in the `[host labels]` section in `netdata.conf`
+ should be sent to the external database
+
+- `send automatic labels = yes | no` controls if automatically created labels, like `_os_name` or `_architecture`
+ should be sent to the external database
+
+> Starting from Netdata v1.20 the host tags (defined in the `[backend]` section of `netdata.conf`) are parsed in
+> accordance with a configured backend type and stored as host labels so that they can be reused in API responses and
+> exporting connectors. The parsing is supported for graphite, json, opentsdb, and prometheus (default) backend types.
+> You can check how the host tags were parsed using the /api/v1/info API call. But, keep in mind that backends subsystem
+> is deprecated and will be deleted soon. Please move your existing tags to the `[host labels]` section.
+
+## HTTPS
+
+Netdata can send metrics to external databases using the TLS/SSL protocol. Unfortunately, some of
+them does not support encrypted connections, so you will have to configure a reverse proxy to enable
+HTTPS communication between Netdata and an external database. You can set up a reverse proxy with
+[Nginx](/docs/Running-behind-nginx.md).
+
+## Exporting engine monitoring
+
+Netdata creates five charts in the dashboard, under the **Netdata Monitoring** section, to help you monitor the health
+and performance of the exporting engine itself:
+
+1. **Buffered metrics**, the number of metrics Netdata added to the buffer for dispatching them to the
+ external database server.
+
+2. **Exporting data size**, the amount of data (in KB) Netdata added the buffer.
+
+3. **Exporting operations**, the number of operations performed by Netdata.
+
+4. **Exporting thread CPU usage**, the CPU resources consumed by the Netdata thread, that is responsible for sending
+ the metrics to the external database server.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/20463536/eb196084-af3d-11e6-8ee5-ddbd3b4d8449.png)
+
+## Exporting engine alarms
+
+Netdata adds 3 alarms:
+
+1. `exporting_last_buffering`, number of seconds since the last successful buffering of exported data
+2. `exporting_metrics_sent`, percentage of metrics sent to the external database server
+3. `exporting_metrics_lost`, number of metrics lost due to repeating failures to contact the external database server
+
+![image](https://cloud.githubusercontent.com/assets/2662304/20463779/a46ed1c2-af43-11e6-91a5-07ca4533cac3.png)
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/TIMESCALE.md b/exporting/TIMESCALE.md
new file mode 100644
index 000000000..c98003ed4
--- /dev/null
+++ b/exporting/TIMESCALE.md
@@ -0,0 +1,69 @@
+<!--
+title: "Writing metrics to TimescaleDB"
+description: "Send Netdata metrics to TimescaleDB for long-term archiving and further analysis."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/TIMESCALE.md
+sidebar_label: Writing metrics to TimescaleDB
+-->
+
+# Writing metrics to TimescaleDB
+
+Thanks to Netdata's community of developers and system administrators, and Mahlon Smith
+([GitHub](https://github.com/mahlonsmith)/[Website](http://www.martini.nu/)) in particular, Netdata now supports
+archiving metrics directly to TimescaleDB.
+
+What's TimescaleDB? Here's how their team defines the project on their [GitHub page](https://github.com/timescale/timescaledb):
+
+> TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is engineered up from
+> PostgreSQL, providing automatic partitioning across time and space (partitioning key), as well as full SQL support.
+
+## Quickstart
+
+To get started archiving metrics to TimescaleDB right away, check out Mahlon's [`netdata-timescale-relay`
+repository](https://github.com/mahlonsmith/netdata-timescale-relay) on GitHub. Please be aware that backends subsystem
+is deprecated and Netdata configuration should be moved to the new `exporting conf` configuration file. Use
+```conf
+[json:my_instance]
+```
+in `exporting.conf` instead of
+```conf
+[backend]
+ type = json
+```
+in `netdata.conf`.
+
+This small program takes JSON streams from a Netdata client and writes them to a PostgreSQL (aka TimescaleDB) table.
+You'll run this program in parallel with Netdata, and after a short [configuration
+process](https://github.com/mahlonsmith/netdata-timescale-relay#configuration), your metrics should start populating
+TimescaleDB.
+
+Finally, another member of Netdata's community has built a project that quickly launches Netdata, TimescaleDB, and
+Grafana in easy-to-manage Docker containers. Rune Juhl Jacobsen's
+[project](https://github.com/runejuhl/grafana-timescaledb) uses a `Makefile` to create everything, which makes it
+perfect for testing and experimentation.
+
+## Netdata&#8596;TimescaleDB in action
+
+Aside from creating incredible contributions to Netdata, Mahlon works at [LAIKA](https://www.laika.com/), an
+Oregon-based animation studio that's helped create acclaimed films like _Coraline_ and _Kubo and the Two Strings_.
+
+As part of his work to maintain the company's infrastructure of render farms, workstations, and virtual machines, he's
+using Netdata, `netdata-timescale-relay`, and TimescaleDB to store Netdata metrics alongside other data from other
+sources.
+
+> LAIKA is a long-time PostgreSQL user and added TimescaleDB to their infrastructure in 2018 to help manage and store
+> their IT metrics and time-series data. So far, the tool has been in production at LAIKA for over a year and helps them
+> with their use case of time-based logging, where they record over 8 million metrics an hour for netdata content alone.
+
+By archiving Netdata metrics to a database like TimescaleDB, LAIKA can consolidate metrics data from distributed
+machines efficiently. Mahlon can then correlate Netdata metrics with other sources directly in TimescaleDB.
+
+And, because LAIKA will soon be storing years worth of Netdata metrics data in TimescaleDB, they can analyze long-term
+metrics as their films move from concept to final cut.
+
+Read the full blog post from LAIKA at the [TimescaleDB
+blog](https://blog.timescale.com/blog/writing-it-metrics-from-netdata-to-timescaledb/amp/).
+
+Thank you to Mahlon, Rune, TimescaleDB, and the members of the Netdata community that requested and then built this
+exporting connection between Netdata and TimescaleDB!
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2FTIMESCALE&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/WALKTHROUGH.md b/exporting/WALKTHROUGH.md
new file mode 100644
index 000000000..ac1712916
--- /dev/null
+++ b/exporting/WALKTHROUGH.md
@@ -0,0 +1,259 @@
+<!--
+title: "Exporting to Netdata, Prometheus, Grafana stack"
+description: "Using Netdata in conjunction with Prometheus and Grafana."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/WALKTHROUGH.md
+sidebar_label: Netdata, Prometheus, Grafana stack
+-->
+
+# Netdata, Prometheus, Grafana stack
+
+## Intro
+
+In this article I will walk you through the basics of getting Netdata, Prometheus and Grafana all working together and
+monitoring your application servers. This article will be using docker on your local workstation. We will be working
+with docker in an ad-hoc way, launching containers that run `/bin/bash` and attaching a TTY to them. I use docker here
+in a purely academic fashion and do not condone running Netdata in a container. I pick this method so individuals
+without cloud accounts or access to VMs can try this out and for it's speed of deployment.
+
+## Why Netdata, Prometheus, and Grafana
+
+Some time ago I was introduced to Netdata by a coworker. We were attempting to troubleshoot python code which seemed to
+be bottlenecked. I was instantly impressed by the amount of metrics Netdata exposes to you. I quickly added Netdata to
+my set of go-to tools when troubleshooting systems performance.
+
+Some time ago, even later, I was introduced to Prometheus. Prometheus is a monitoring application which flips the normal
+architecture around and polls rest endpoints for its metrics. This architectural change greatly simplifies and decreases
+the time necessary to begin monitoring your applications. Compared to current monitoring solutions the time spent on
+designing the infrastructure is greatly reduced. Running a single Prometheus server per application becomes feasible
+with the help of Grafana.
+
+Grafana has been the go to graphing tool for… some time now. It's awesome, anyone that has used it knows it's awesome.
+We can point Grafana at Prometheus and use Prometheus as a data source. This allows a pretty simple overall monitoring
+architecture: Install Netdata on your application servers, point Prometheus at Netdata, and then point Grafana at
+Prometheus.
+
+I'm omitting an important ingredient in this stack in order to keep this tutorial simple and that is service discovery.
+My personal preference is to use Consul. Prometheus can plug into consul and automatically begin to scrape new hosts
+that register a Netdata client with Consul.
+
+At the end of this tutorial you will understand how each technology fits together to create a modern monitoring stack.
+This stack will offer you visibility into your application and systems performance.
+
+## Getting Started - Netdata
+
+To begin let's create our container which we will install Netdata on. We need to run a container, forward the necessary
+port that Netdata listens on, and attach a tty so we can interact with the bash shell on the container. But before we do
+this we want name resolution between the two containers to work. In order to accomplish this we will create a
+user-defined network and attach both containers to this network. The first command we should run is:
+
+```sh
+docker network create --driver bridge netdata-tutorial
+```
+
+With this user-defined network created we can now launch our container we will install Netdata on and point it to this
+network.
+
+```sh
+docker run -it --name netdata --hostname netdata --network=netdata-tutorial -p 19999:19999 centos:latest '/bin/bash'
+```
+
+This command creates an interactive tty session (`-it`), gives the container both a name in relation to the docker
+daemon and a hostname (this is so you know what container is which when working in the shells and docker maps hostname
+resolution to this container), forwards the local port 19999 to the container's port 19999 (`-p 19999:19999`), sets the
+command to run (`/bin/bash`) and then chooses the base container images (`centos:latest`). After running this you should
+be sitting inside the shell of the container.
+
+After we have entered the shell we can install Netdata. This process could not be easier. If you take a look at [this
+link](/packaging/installer/README.md), the Netdata devs give us several one-liners to install Netdata. I have not had
+any issues with these one liners and their bootstrapping scripts so far (If you guys run into anything do share). Run
+the following command in your container.
+
+```sh
+bash <(curl -Ss https://my-netdata.io/kickstart.sh) --dont-wait
+```
+
+After the install completes you should be able to hit the Netdata dashboard at <http://localhost:19999/> (replace
+localhost if you're doing this on a VM or have the docker container hosted on a machine not on your local system). If
+this is your first time using Netdata I suggest you take a look around. The amount of time I've spent digging through
+`/proc` and calculating my own metrics has been greatly reduced by this tool. Take it all in.
+
+Next I want to draw your attention to a particular endpoint. Navigate to
+<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes> In your browser. This is the endpoint which
+publishes all the metrics in a format which Prometheus understands. Let's take a look at one of these metrics.
+`netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0831255 1501271696000` This
+metric is representing several things which I will go in more details in the section on Prometheus. For now understand
+that this metric: `netdata_system_cpu_percentage_average` has several labels: (`chart`, `family`, `dimension`). This
+corresponds with the first cpu chart you see on the Netdata dashboard.
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%204.00.45%20PM.png)
+
+This CHART is called `system.cpu`, The FAMILY is `cpu`, and the DIMENSION we are observing is `system`. You can begin to
+draw links between the charts in Netdata to the Prometheus metrics format in this manner.
+
+## Prometheus
+
+We will be installing Prometheus in a container for purpose of demonstration. While Prometheus does have an official
+container I would like to walk through the install process and setup on a fresh container. This will allow anyone
+reading to migrate this tutorial to a VM or Server of any sort.
+
+Let's start another container in the same fashion as we did the Netdata container.
+
+```sh
+docker run -it --name prometheus --hostname prometheus
+--network=netdata-tutorial -p 9090:9090 centos:latest '/bin/bash'
+```
+
+This should drop you into a shell once again. Once there quickly install your favorite editor as we will be editing
+files later in this tutorial.
+
+```sh
+yum install vim -y
+```
+
+Prometheus provides a tarball of their latest stable versions [here](https://prometheus.io/download/).
+
+Let's download the latest version and install into your container.
+
+```sh
+cd /tmp && curl -s https://api.github.com/repos/prometheus/prometheus/releases/latest \
+| grep "browser_download_url.*linux-amd64.tar.gz" \
+| cut -d '"' -f 4 \
+| wget -qi -
+
+mkdir /opt/prometheus
+
+sudo tar -xvf /tmp/prometheus-*linux-amd64.tar.gz -C /opt/prometheus --strip=1
+```
+
+This should get Prometheus installed into the container. Let's test that we can run Prometheus and connect to it's web
+interface.
+
+```sh
+/opt/prometheus/prometheus
+```
+
+Now attempt to go to <http://localhost:9090/>. You should be presented with the Prometheus homepage. This is a good
+point to talk about Prometheus's data model which can be viewed here: <https://prometheus.io/docs/concepts/data_model/>
+As explained we have two key elements in Prometheus metrics. We have the _metric_ and its _labels_. Labels allow for
+granularity between metrics. Let's use our previous example to further explain.
+
+```conf
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0831255 1501271696000
+```
+
+Here our metric is `netdata_system_cpu_percentage_average` and our labels are `chart`, `family`, and `dimension`. The
+last two values constitute the actual metric value for the metric type (gauge, counter, etc…). We can begin graphing
+system metrics with this information, but first we need to hook up Prometheus to poll Netdata stats.
+
+Let's move our attention to Prometheus's configuration. Prometheus gets it config from the file located (in our example)
+at `/opt/prometheus/prometheus.yml`. I won't spend an extensive amount of time going over the configuration values
+documented here: <https://prometheus.io/docs/operating/configuration/>. We will be adding a new job under the
+`scrape_configs`. Let's make the `scrape_configs` section look like this (we can use the DNS name Netdata due to the
+custom user-defined network we created in docker beforehand).
+
+```yaml
+scrape_configs:
+ # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+ - job_name: 'prometheus'
+
+ # metrics_path defaults to '/metrics'
+ # scheme defaults to 'http'.
+
+ static_configs:
+ - targets: ['localhost:9090']
+
+ - job_name: 'netdata'
+
+ metrics_path: /api/v1/allmetrics
+ params:
+ format: [ prometheus ]
+
+ static_configs:
+ - targets: ['netdata:19999']
+```
+
+Let's start Prometheus once again by running `/opt/prometheus/prometheus`. If we now navigate to Prometheus at
+<http://localhost:9090/targets> we should see our target being successfully scraped. If we now go back to the
+Prometheus's homepage and begin to type `netdata\_` Prometheus should auto complete metrics it is now scraping.
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.13.43%20PM.png)
+
+Let's now start exploring how we can graph some metrics. Back in our NetData container lets get the CPU spinning with a
+pointless busy loop. On the shell do the following:
+
+```sh
+[root@netdata /]# while true; do echo "HOT HOT HOT CPU"; done
+```
+
+Our NetData cpu graph should be showing some activity. Let's represent this in Prometheus. In order to do this let's
+keep our metrics page open for reference: <http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes>. We are
+setting out to graph the data in the CPU chart so let's search for `system.cpu` in the metrics page above. We come
+across a section of metrics with the first comments `# COMMENT homogeneous chart "system.cpu", context "system.cpu",
+family "cpu", units "percentage"` followed by the metrics. This is a good start now let us drill down to the specific
+metric we would like to graph.
+
+```conf
+# COMMENT
+netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1501275951 to 1501275951 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0000000 1501275951000
+```
+
+Here we learn that the metric name we care about is `netdata_system_cpu_percentage_average` so throw this into
+Prometheus and see what we get. We should see something similar to this (I shut off my busy loop)
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.47.53%20PM.png)
+
+This is a good step toward what we want. Also make note that Prometheus will tag on an `instance` label for us which
+corresponds to our statically defined job in the configuration file. This allows us to tailor our queries to specific
+instances. Now we need to isolate the dimension we want in our query. To do this let us refine the query slightly. Let's
+query the dimension also. Place this into our query text box.
+`netdata_system_cpu_percentage_average{dimension="system"}` We now wind up with the following graph.
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.54.40%20PM.png)
+
+Awesome, this is exactly what we wanted. If you haven't caught on yet we can emulate entire charts from NetData by using
+the `chart` dimension. If you'd like you can combine the `chart` and `instance` dimension to create per-instance charts.
+Let's give this a try: `netdata_system_cpu_percentage_average{chart="system.cpu", instance="netdata:19999"}`
+
+This is the basics of using Prometheus to query NetData. I'd advise everyone at this point to read [this
+page](/exporting/prometheus/#using-netdata-with-prometheus). The key point here is that NetData can export metrics from
+its internal DB or can send metrics _as-collected_ by specifying the `source=as-collected` URL parameter like so.
+<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes&types=yes&source=as-collected> If you choose to use
+this method you will need to use Prometheus's set of functions here: <https://prometheus.io/docs/querying/functions/> to
+obtain useful metrics as you are now dealing with raw counters from the system. For example you will have to use the
+`irate()` function over a counter to get that metric's rate per second. If your graphing needs are met by using the
+metrics returned by NetData's internal database (not specifying any source= URL parameter) then use that. If you find
+limitations then consider re-writing your queries using the raw data and using Prometheus functions to get the desired
+chart.
+
+## Grafana
+
+Finally we make it to grafana. This is the easiest part in my opinion. This time we will actually run the official
+grafana docker container as all configuration we need to do is done via the GUI. Let's run the following command:
+
+```sh
+docker run -i -p 3000:3000 --network=netdata-tutorial grafana/grafana
+```
+
+This will get grafana running at <http://localhost:3000/>. Let's go there and
+login using the credentials Admin:Admin.
+
+The first thing we want to do is click "Add data source". Let's make it look like the following screenshot
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%206.36.55%20PM.png)
+
+With this completed let's graph! Create a new Dashboard by clicking on the top left Grafana Icon and create a new graph
+in that dashboard. Fill in the query like we did above and save.
+
+![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%206.39.38%20PM.png)
+
+## Conclusion
+
+There you have it, a complete systems monitoring stack which is very easy to deploy. From here I would begin to
+understand how Prometheus and a service discovery mechanism such as Consul can play together nicely. My current prod
+deployments automatically register Netdata services into Consul and Prometheus automatically begins to scrape them. Once
+achieved you do not have to think about the monitoring system until Prometheus cannot keep up with your scale. Once this
+happens there are options presented in the Prometheus documentation for solving this. Hope this was helpful, happy
+monitoring.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2FWALKTHROUGH&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/aws_kinesis/Makefile.am b/exporting/aws_kinesis/Makefile.am
new file mode 100644
index 000000000..161784b8f
--- /dev/null
+++ b/exporting/aws_kinesis/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/exporting/aws_kinesis/README.md b/exporting/aws_kinesis/README.md
new file mode 100644
index 000000000..299fec581
--- /dev/null
+++ b/exporting/aws_kinesis/README.md
@@ -0,0 +1,58 @@
+<!--
+title: "Export metrics to AWS Kinesis Data Streams"
+description: "Archive your Agent's metrics to AWS Kinesis Data Streams for long-term storage, further analysis, or correlation with data from other sources."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/aws_kinesis/README.md
+sidebar_label: AWS Kinesis Data Streams
+-->
+
+# Export metrics to AWS Kinesis Data Streams
+
+## Prerequisites
+
+To use AWS Kinesis for metric collecting and processing, you should first
+[install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++.
+`libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled. Next, Netdata
+should be re-installed from the source. The installer will detect that the required libraries are now available.
+
+If the AWS SDK for C++ is being installed from source, it is useful to set `-DBUILD_ONLY=kinesis`. Otherwise, the
+build process could take a very long time. Note, that the default installation path for the libraries is
+`/usr/local/lib64`. Many Linux distributions don't include this path as the default one for a library search, so it is
+advisable to use the following options to `cmake` while building the AWS SDK:
+
+```sh
+sudo cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis <aws-sdk-cpp sources>
+```
+
+The `-DCMAKE_INSTALL_PREFIX=/usr` option also ensures that
+[third party dependencies](https://github.com/aws/aws-sdk-cpp#third-party-dependencies) are installed in your system
+during the SDK build process.
+
+## Configuration
+
+To enable data sending to the Kinesis service, run `./edit-config exporting.conf` in the Netdata configuration directory
+and set the following options:
+
+```conf
+[kinesis:my_instance]
+ enabled = yes
+ destination = us-east-1
+```
+
+Set the `destination` option to an AWS region.
+
+Set AWS credentials and stream name:
+
+```conf
+ # AWS credentials
+ aws_access_key_id = your_access_key_id
+ aws_secret_access_key = your_secret_access_key
+ # destination stream
+ stream name = your_stream_name
+```
+
+Alternatively, you can set AWS credentials for the `netdata` user using AWS SDK for C++ [standard methods](https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/credentials.html).
+
+Netdata automatically computes a partition key for every record with the purpose to distribute records across
+available shards evenly.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Faws_kinesis%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/aws_kinesis/aws_kinesis.c b/exporting/aws_kinesis/aws_kinesis.c
new file mode 100644
index 000000000..036afb49f
--- /dev/null
+++ b/exporting/aws_kinesis/aws_kinesis.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "aws_kinesis.h"
+
+/**
+ * Clean AWS Kinesis *
+ */
+void aws_kinesis_cleanup(struct instance *instance)
+{
+ info("EXPORTING: cleaning up instance %s ...", instance->config.name);
+ kinesis_shutdown(instance->connector_specific_data);
+
+ freez(instance->connector_specific_data);
+
+ struct aws_kinesis_specific_config *connector_specific_config = instance->config.connector_specific_config;
+ if (connector_specific_config) {
+ freez(connector_specific_config->auth_key_id);
+ freez(connector_specific_config->secure_key);
+ freez(connector_specific_config->stream_name);
+
+ freez(connector_specific_config);
+ }
+
+ info("EXPORTING: instance %s exited", instance->config.name);
+ instance->exited = 1;
+}
+
+/**
+ * Initialize AWS Kinesis connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_aws_kinesis_instance(struct instance *instance)
+{
+ instance->worker = aws_kinesis_connector_worker;
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_labels_json_plaintext;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_json_plaintext;
+ else
+ instance->metric_formatting = format_dimension_stored_json_plaintext;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = NULL;
+
+ instance->prepare_header = NULL;
+ instance->check_response = NULL;
+
+ instance->buffer = (void *)buffer_create(0);
+ if (!instance->buffer) {
+ error("EXPORTING: cannot create buffer for AWS Kinesis exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ if (!instance->engine->aws_sdk_initialized) {
+ aws_sdk_init();
+ instance->engine->aws_sdk_initialized = 1;
+ }
+
+ struct aws_kinesis_specific_config *connector_specific_config = instance->config.connector_specific_config;
+ struct aws_kinesis_specific_data *connector_specific_data = callocz(1, sizeof(struct aws_kinesis_specific_data));
+ instance->connector_specific_data = (void *)connector_specific_data;
+
+ if (!strcmp(connector_specific_config->stream_name, "")) {
+ error("stream name is a mandatory Kinesis parameter but it is not configured");
+ return 1;
+ }
+
+ kinesis_init(
+ (void *)connector_specific_data,
+ instance->config.destination,
+ connector_specific_config->auth_key_id,
+ connector_specific_config->secure_key,
+ instance->config.timeoutms);
+
+ return 0;
+}
+
+/**
+ * AWS Kinesis connector worker
+ *
+ * Runs in a separate thread for every instance.
+ *
+ * @param instance_p an instance data structure.
+ */
+void aws_kinesis_connector_worker(void *instance_p)
+{
+ struct instance *instance = (struct instance *)instance_p;
+ struct aws_kinesis_specific_config *connector_specific_config = instance->config.connector_specific_config;
+ struct aws_kinesis_specific_data *connector_specific_data = instance->connector_specific_data;
+
+ while (!instance->engine->exit) {
+ unsigned long long partition_key_seq = 0;
+ struct stats *stats = &instance->stats;
+
+ uv_mutex_lock(&instance->mutex);
+ while (!instance->data_is_ready)
+ uv_cond_wait(&instance->cond_var, &instance->mutex);
+ instance->data_is_ready = 0;
+
+ if (unlikely(instance->engine->exit)) {
+ uv_mutex_unlock(&instance->mutex);
+ break;
+ }
+
+ // reset the monitoring chart counters
+ stats->received_bytes =
+ stats->sent_bytes =
+ stats->sent_metrics =
+ stats->lost_metrics =
+ stats->receptions =
+ stats->transmission_successes =
+ stats->transmission_failures =
+ stats->data_lost_events =
+ stats->lost_bytes =
+ stats->reconnects = 0;
+
+ BUFFER *buffer = (BUFFER *)instance->buffer;
+ size_t buffer_len = buffer_strlen(buffer);
+
+ stats->buffered_bytes = buffer_len;
+
+ size_t sent = 0;
+
+ while (sent < buffer_len) {
+ char partition_key[KINESIS_PARTITION_KEY_MAX + 1];
+ snprintf(partition_key, KINESIS_PARTITION_KEY_MAX, "netdata_%llu", partition_key_seq++);
+ size_t partition_key_len = strnlen(partition_key, KINESIS_PARTITION_KEY_MAX);
+
+ const char *first_char = buffer_tostring(buffer) + sent;
+
+ size_t record_len = 0;
+
+ // split buffer into chunks of maximum allowed size
+ if (buffer_len - sent < KINESIS_RECORD_MAX - partition_key_len) {
+ record_len = buffer_len - sent;
+ } else {
+ record_len = KINESIS_RECORD_MAX - partition_key_len;
+ while (record_len && *(first_char + record_len - 1) != '\n')
+ record_len--;
+ }
+ char error_message[ERROR_LINE_MAX + 1] = "";
+
+ debug(
+ D_BACKEND,
+ "EXPORTING: kinesis_put_record(): dest = %s, id = %s, key = %s, stream = %s, partition_key = %s, \
+ buffer = %zu, record = %zu",
+ instance->config.destination,
+ connector_specific_config->auth_key_id,
+ connector_specific_config->secure_key,
+ connector_specific_config->stream_name,
+ partition_key,
+ buffer_len,
+ record_len);
+
+ kinesis_put_record(
+ connector_specific_data, connector_specific_config->stream_name, partition_key, first_char, record_len);
+
+ sent += record_len;
+ stats->transmission_successes++;
+
+ size_t sent_bytes = 0, lost_bytes = 0;
+
+ if (unlikely(kinesis_get_result(
+ connector_specific_data->request_outcomes, error_message, &sent_bytes, &lost_bytes))) {
+ // oops! we couldn't send (all or some of the) data
+ error("EXPORTING: %s", error_message);
+ error(
+ "EXPORTING: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zu bytes.",
+ instance->config.destination, sent_bytes, sent_bytes - lost_bytes);
+
+ stats->transmission_failures++;
+ stats->data_lost_events++;
+ stats->lost_bytes += lost_bytes;
+
+ // estimate the number of lost metrics
+ stats->lost_metrics += (collected_number)(
+ stats->buffered_metrics *
+ (buffer_len && (lost_bytes > buffer_len) ? (double)lost_bytes / buffer_len : 1));
+
+ break;
+ } else {
+ stats->receptions++;
+ }
+
+ if (unlikely(instance->engine->exit))
+ break;
+ }
+
+ stats->sent_bytes += sent;
+ if (likely(sent == buffer_len))
+ stats->sent_metrics = stats->buffered_metrics;
+
+ buffer_flush(buffer);
+
+ send_internal_metrics(instance);
+
+ stats->buffered_metrics = 0;
+
+ uv_mutex_unlock(&instance->mutex);
+
+#ifdef UNIT_TESTING
+ return;
+#endif
+ }
+
+ aws_kinesis_cleanup(instance);
+}
diff --git a/exporting/aws_kinesis/aws_kinesis.h b/exporting/aws_kinesis/aws_kinesis.h
new file mode 100644
index 000000000..d88a45861
--- /dev/null
+++ b/exporting/aws_kinesis/aws_kinesis.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_KINESIS_H
+#define NETDATA_EXPORTING_KINESIS_H
+
+#include "exporting/exporting_engine.h"
+#include "exporting/json/json.h"
+#include "aws_kinesis_put_record.h"
+
+#define KINESIS_PARTITION_KEY_MAX 256
+#define KINESIS_RECORD_MAX 1024 * 1024
+
+int init_aws_kinesis_instance(struct instance *instance);
+void aws_kinesis_connector_worker(void *instance_p);
+
+#endif //NETDATA_EXPORTING_KINESIS_H
diff --git a/exporting/aws_kinesis/aws_kinesis_put_record.cc b/exporting/aws_kinesis/aws_kinesis_put_record.cc
new file mode 100644
index 000000000..b20ec1373
--- /dev/null
+++ b/exporting/aws_kinesis/aws_kinesis_put_record.cc
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <aws/core/Aws.h>
+#include <aws/core/client/ClientConfiguration.h>
+#include <aws/core/auth/AWSCredentials.h>
+#include <aws/core/utils/Outcome.h>
+#include <aws/kinesis/KinesisClient.h>
+#include <aws/kinesis/model/PutRecordRequest.h>
+#include "aws_kinesis_put_record.h"
+
+using namespace Aws;
+
+static SDKOptions options;
+
+struct request_outcome {
+ Kinesis::Model::PutRecordOutcomeCallable future_outcome;
+ size_t data_len;
+};
+
+/**
+ * Initialize AWS SDK API
+ */
+void aws_sdk_init()
+{
+ InitAPI(options);
+}
+
+/**
+ * Shutdown AWS SDK API
+ */
+void aws_sdk_shutdown()
+{
+ ShutdownAPI(options);
+}
+
+/**
+ * Initialize a client and a data structure for request outcomes
+ *
+ * @param kinesis_specific_data_p a pointer to a structure with client and request outcome information.
+ * @param region AWS region.
+ * @param access_key_id AWS account access key ID.
+ * @param secret_key AWS account secret access key.
+ * @param timeout communication timeout.
+ */
+void kinesis_init(
+ void *kinesis_specific_data_p, const char *region, const char *access_key_id, const char *secret_key,
+ const long timeout)
+{
+ struct aws_kinesis_specific_data *kinesis_specific_data =
+ (struct aws_kinesis_specific_data *)kinesis_specific_data_p;
+
+ Client::ClientConfiguration config;
+
+ config.region = region;
+ config.requestTimeoutMs = timeout;
+ config.connectTimeoutMs = timeout;
+
+ Kinesis::KinesisClient *client;
+
+ if (access_key_id && *access_key_id && secret_key && *secret_key) {
+ client = New<Kinesis::KinesisClient>("client", Auth::AWSCredentials(access_key_id, secret_key), config);
+ } else {
+ client = New<Kinesis::KinesisClient>("client", config);
+ }
+ kinesis_specific_data->client = (void *)client;
+
+ Vector<request_outcome> *request_outcomes;
+
+ request_outcomes = new Vector<request_outcome>;
+ kinesis_specific_data->request_outcomes = (void *)request_outcomes;
+}
+
+/**
+ * Deallocate Kinesis specific data
+ *
+ * @param kinesis_specific_data_p a pointer to a structure with client and request outcome information.
+ */
+void kinesis_shutdown(void *kinesis_specific_data_p)
+{
+ struct aws_kinesis_specific_data *kinesis_specific_data =
+ (struct aws_kinesis_specific_data *)kinesis_specific_data_p;
+
+ Delete((Kinesis::KinesisClient *)kinesis_specific_data->client);
+ delete (Vector<request_outcome> *)kinesis_specific_data->request_outcomes;
+}
+
+/**
+ * Send data to the Kinesis service
+ *
+ * @param kinesis_specific_data_p a pointer to a structure with client and request outcome information.
+ * @param stream_name the name of a stream to send to.
+ * @param partition_key a partition key which automatically maps data to a specific stream.
+ * @param data a data buffer to send to the stream.
+ * @param data_len the length of the data buffer.
+ */
+void kinesis_put_record(
+ void *kinesis_specific_data_p, const char *stream_name, const char *partition_key, const char *data,
+ size_t data_len)
+{
+ struct aws_kinesis_specific_data *kinesis_specific_data =
+ (struct aws_kinesis_specific_data *)kinesis_specific_data_p;
+ Kinesis::Model::PutRecordRequest request;
+
+ request.SetStreamName(stream_name);
+ request.SetPartitionKey(partition_key);
+ request.SetData(Utils::ByteBuffer((unsigned char *)data, data_len));
+
+ ((Vector<request_outcome> *)(kinesis_specific_data->request_outcomes))->push_back(
+ { ((Kinesis::KinesisClient *)(kinesis_specific_data->client))->PutRecordCallable(request), data_len });
+}
+
+/**
+ * Get results from service responces
+ *
+ * @param request_outcomes_p request outcome information.
+ * @param error_message report error message to a caller.
+ * @param sent_bytes report to a caller how many bytes was successfuly sent.
+ * @param lost_bytes report to a caller how many bytes was lost during transmission.
+ * @return Returns 0 if all data was sent successfully, 1 when data was lost on transmission
+ */
+int kinesis_get_result(void *request_outcomes_p, char *error_message, size_t *sent_bytes, size_t *lost_bytes)
+{
+ Vector<request_outcome> *request_outcomes = (Vector<request_outcome> *)request_outcomes_p;
+ Kinesis::Model::PutRecordOutcome outcome;
+ *sent_bytes = 0;
+ *lost_bytes = 0;
+
+ for (auto request_outcome = request_outcomes->begin(); request_outcome != request_outcomes->end();) {
+ std::future_status status = request_outcome->future_outcome.wait_for(std::chrono::microseconds(100));
+
+ if (status == std::future_status::ready || status == std::future_status::deferred) {
+ outcome = request_outcome->future_outcome.get();
+ *sent_bytes += request_outcome->data_len;
+
+ if (!outcome.IsSuccess()) {
+ *lost_bytes += request_outcome->data_len;
+ outcome.GetError().GetMessage().copy(error_message, ERROR_LINE_MAX);
+ }
+
+ request_outcomes->erase(request_outcome);
+ } else {
+ ++request_outcome;
+ }
+ }
+
+ if (*lost_bytes) {
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/exporting/aws_kinesis/aws_kinesis_put_record.h b/exporting/aws_kinesis/aws_kinesis_put_record.h
new file mode 100644
index 000000000..321baf669
--- /dev/null
+++ b/exporting/aws_kinesis/aws_kinesis_put_record.h
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_KINESIS_PUT_RECORD_H
+#define NETDATA_EXPORTING_KINESIS_PUT_RECORD_H
+
+#define ERROR_LINE_MAX 1023
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct aws_kinesis_specific_data {
+ void *client;
+ void *request_outcomes;
+};
+
+void aws_sdk_init();
+void aws_sdk_shutdown();
+
+void kinesis_init(
+ void *kinesis_specific_data_p, const char *region, const char *access_key_id, const char *secret_key,
+ const long timeout);
+void kinesis_shutdown(void *client);
+
+void kinesis_put_record(
+ void *kinesis_specific_data_p, const char *stream_name, const char *partition_key, const char *data,
+ size_t data_len);
+
+int kinesis_get_result(void *request_outcomes_p, char *error_message, size_t *sent_bytes, size_t *lost_bytes);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //NETDATA_EXPORTING_KINESIS_PUT_RECORD_H
diff --git a/exporting/check_filters.c b/exporting/check_filters.c
new file mode 100644
index 000000000..cfe0b4ce4
--- /dev/null
+++ b/exporting/check_filters.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+
+/**
+ * Check if the connector instance should export the host metrics
+ *
+ * @param instance an exporting connector instance.
+ * @param host a data collecting host.
+ * @return Returns 1 if the connector instance should export the host metrics
+ */
+int rrdhost_is_exportable(struct instance *instance, RRDHOST *host)
+{
+ if (host->exporting_flags == NULL)
+ host->exporting_flags = callocz(instance->engine->instance_num, sizeof(size_t));
+
+ RRDHOST_FLAGS *flags = &host->exporting_flags[instance->index];
+
+ if (unlikely((*flags & (RRDHOST_FLAG_BACKEND_SEND | RRDHOST_FLAG_BACKEND_DONT_SEND)) == 0)) {
+ char *host_name = (host == localhost) ? "localhost" : host->hostname;
+
+ if (!instance->config.hosts_pattern || simple_pattern_matches(instance->config.hosts_pattern, host_name)) {
+ *flags |= RRDHOST_FLAG_BACKEND_SEND;
+ info("enabled exporting of host '%s' for instance '%s'", host_name, instance->config.name);
+ } else {
+ *flags |= RRDHOST_FLAG_BACKEND_DONT_SEND;
+ info("disabled exporting of host '%s' for instance '%s'", host_name, instance->config.name);
+ }
+ }
+
+ if (likely(*flags & RRDHOST_FLAG_BACKEND_SEND))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * Check if the connector instance should export the chart
+ *
+ * @param instance an exporting connector instance.
+ * @param st a chart.
+ * @return Returns 1 if the connector instance should export the chart
+ */
+int rrdset_is_exportable(struct instance *instance, RRDSET *st)
+{
+ RRDHOST *host = st->rrdhost;
+
+ if (st->exporting_flags == NULL)
+ st->exporting_flags = callocz(instance->engine->instance_num, sizeof(size_t));
+
+ RRDSET_FLAGS *flags = &st->exporting_flags[instance->index];
+
+ if(unlikely(*flags & RRDSET_FLAG_BACKEND_IGNORE))
+ return 0;
+
+ if(unlikely(!(*flags & RRDSET_FLAG_BACKEND_SEND))) {
+ // we have not checked this chart
+ if(simple_pattern_matches(instance->config.charts_pattern, st->id) || simple_pattern_matches(instance->config.charts_pattern, st->name))
+ *flags |= RRDSET_FLAG_BACKEND_SEND;
+ else {
+ *flags |= RRDSET_FLAG_BACKEND_IGNORE;
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is disabled for backends.", st->id, host->hostname);
+ return 0;
+ }
+ }
+
+ if(unlikely(!rrdset_is_available_for_backends(st))) {
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s', because it is not available for backends.", st->id, host->hostname);
+ return 0;
+ }
+
+ if(unlikely(st->rrd_memory_mode == RRD_MEMORY_MODE_NONE && !(EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED))) {
+ debug(D_BACKEND, "BACKEND: not sending chart '%s' of host '%s' because its memory mode is '%s' and the backend requires database access.", st->id, host->hostname, rrd_memory_mode_name(host->rrd_memory_mode));
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/exporting/clean_connectors.c b/exporting/clean_connectors.c
new file mode 100644
index 000000000..890e8daac
--- /dev/null
+++ b/exporting/clean_connectors.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+
+/**
+ * Clean the instance config.
+ *
+ * @param config an instance config structure.
+ */
+static void clean_instance_config(struct instance_config *config)
+{
+ if(!config)
+ return;
+
+ freez((void *)config->type_name);
+ freez((void *)config->name);
+ freez((void *)config->destination);
+ freez((void *)config->prefix);
+ freez((void *)config->hostname);
+
+ simple_pattern_free(config->charts_pattern);
+
+ simple_pattern_free(config->hosts_pattern);
+}
+
+/**
+ * Clean the allocated variables
+ *
+ * @param instance an instance data structure.
+ */
+void clean_instance(struct instance *instance)
+{
+ clean_instance_config(&instance->config);
+ buffer_free(instance->labels);
+
+ uv_cond_destroy(&instance->cond_var);
+ // uv_mutex_destroy(&instance->mutex);
+}
+
+/**
+ * Clean up a simple connector instance on Netdata exit
+ *
+ * @param instance an instance data structure.
+ */
+void simple_connector_cleanup(struct instance *instance)
+{
+ info("EXPORTING: cleaning up instance %s ...", instance->config.name);
+
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+
+ buffer_free(instance->buffer);
+ buffer_free(simple_connector_data->buffer);
+ buffer_free(simple_connector_data->header);
+
+ struct simple_connector_buffer *next_buffer = simple_connector_data->first_buffer;
+ for (int i = 0; i < instance->config.buffer_on_failures; i++) {
+ struct simple_connector_buffer *current_buffer = next_buffer;
+ next_buffer = next_buffer->next;
+
+ buffer_free(current_buffer->header);
+ buffer_free(current_buffer->buffer);
+ freez(current_buffer);
+ }
+
+#ifdef ENABLE_HTTPS
+ if (simple_connector_data->conn)
+ SSL_free(simple_connector_data->conn);
+#endif
+
+ freez(simple_connector_data);
+
+ struct simple_connector_config *simple_connector_config =
+ (struct simple_connector_config *)instance->config.connector_specific_config;
+ freez(simple_connector_config);
+
+ info("EXPORTING: instance %s exited", instance->config.name);
+ instance->exited = 1;
+
+ return;
+}
diff --git a/exporting/exporting.conf b/exporting/exporting.conf
new file mode 100644
index 000000000..c2e902c05
--- /dev/null
+++ b/exporting/exporting.conf
@@ -0,0 +1,89 @@
+[exporting:global]
+ enabled = no
+ # send configured labels = yes
+ # send automatic labels = no
+ # update every = 10
+
+[prometheus:exporter]
+ # data source = average
+ # send names instead of ids = yes
+ # send configured labels = yes
+ # send automatic labels = no
+ # send charts matching = *
+ # send hosts matching = localhost *
+ # prefix = netdata
+
+# An example configuration for graphite, json, opentsdb exporting connectors
+# [graphite:my_graphite_instance]
+ # enabled = no
+ # destination = localhost
+ # data source = average
+ # prefix = netdata
+ # hostname = my_hostname
+ # update every = 10
+ # buffer on failures = 10
+ # timeout ms = 20000
+ # send names instead of ids = yes
+ # send charts matching = *
+ # send hosts matching = localhost *
+
+# [prometheus_remote_write:my_prometheus_remote_write_instance]
+ # enabled = no
+ # destination = localhost
+ # remote write URL path = /receive
+ # data source = average
+ # prefix = netdata
+ # hostname = my_hostname
+ # update every = 10
+ # buffer on failures = 10
+ # timeout ms = 20000
+ # send names instead of ids = yes
+ # send charts matching = *
+ # send hosts matching = localhost *
+
+# [kinesis:my_kinesis_instance]
+ # enabled = no
+ # destination = us-east-1
+ # stream name = netdata
+ # aws_access_key_id = my_access_key_id
+ # aws_secret_access_key = my_aws_secret_access_key
+ # data source = average
+ # prefix = netdata
+ # hostname = my_hostname
+ # update every = 10
+ # buffer on failures = 10
+ # timeout ms = 20000
+ # send names instead of ids = yes
+ # send charts matching = *
+ # send hosts matching = localhost *
+
+# [pubsub:my_pubsub_instance]
+ # enabled = no
+ # destination = pubsub.googleapis.com
+ # credentials file = /etc/netdata/pubsub_credentials.json
+ # project id = my_project
+ # topic id = my_topic
+ # data source = average
+ # prefix = netdata
+ # hostname = my_hostname
+ # update every = 10
+ # buffer on failures = 10
+ # timeout ms = 20000
+ # send names instead of ids = yes
+ # send charts matching = *
+ # send hosts matching = localhost *
+
+# [mongodb:my_mongodb_instance]
+ # enabled = no
+ # destination = localhost
+ # database = my_database
+ # collection = my_collection
+ # data source = average
+ # prefix = netdata
+ # hostname = my_hostname
+ # update every = 10
+ # buffer on failures = 10
+ # timeout ms = 20000
+ # send names instead of ids = yes
+ # send charts matching = *
+ # send hosts matching = localhost *
diff --git a/exporting/exporting_engine.c b/exporting/exporting_engine.c
new file mode 100644
index 000000000..6a1320cd1
--- /dev/null
+++ b/exporting/exporting_engine.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+
+static struct engine *engine = NULL;
+
+/**
+ * Exporting Clean Engine
+ *
+ * Clean all variables allocated inside engine structure
+ *
+ * @param en a pointer to the strcuture that will be cleaned.
+ */
+static void exporting_clean_engine()
+{
+ if (!engine)
+ return;
+
+#if HAVE_KINESIS
+ if (engine->aws_sdk_initialized)
+ aws_sdk_shutdown();
+#endif
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ if (engine->protocol_buffers_initialized)
+ protocol_buffers_shutdown();
+#endif
+
+ //Cleanup web api
+ prometheus_clean_server_root();
+
+ for (struct instance *instance = engine->instance_root; instance;) {
+ struct instance *current_instance = instance;
+ instance = instance->next;
+
+ clean_instance(current_instance);
+ }
+
+ freez((void *)engine->config.hostname);
+ freez(engine);
+}
+
+/**
+ * Clean up the main exporting thread and all connector workers on Netdata exit
+ *
+ * @param ptr thread data.
+ */
+static void exporting_main_cleanup(void *ptr)
+{
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+
+ info("cleaning up...");
+
+ if (!engine) {
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+ return;
+ }
+
+ engine->exit = 1;
+
+ int found = 0;
+ usec_t max = 2 * USEC_PER_SEC, step = 50000;
+
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (!instance->exited) {
+ found++;
+ info("stopping worker for instance %s", instance->config.name);
+ uv_mutex_unlock(&instance->mutex);
+ instance->data_is_ready = 1;
+ uv_cond_signal(&instance->cond_var);
+ } else
+ info("found stopped worker for instance %s", instance->config.name);
+ }
+
+ while (found && max > 0) {
+ max -= step;
+ info("Waiting %d exporting connectors to finish...", found);
+ sleep_usec(step);
+ found = 0;
+
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (!instance->exited)
+ found++;
+ }
+ }
+
+ exporting_clean_engine();
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+}
+
+/**
+ * Exporting engine main
+ *
+ * The main thread used to control the exporting engine.
+ *
+ * @param ptr a pointer to netdata_static_structure.
+ *
+ * @return It always returns NULL.
+ */
+void *exporting_main(void *ptr)
+{
+ netdata_thread_cleanup_push(exporting_main_cleanup, ptr);
+
+ engine = read_exporting_config();
+ if (!engine) {
+ info("EXPORTING: no exporting connectors configured");
+ goto cleanup;
+ }
+
+ if (init_connectors(engine) != 0) {
+ error("EXPORTING: cannot initialize exporting connectors");
+ send_statistics("EXPORTING_START", "FAIL", "-");
+ goto cleanup;
+ }
+
+ RRDSET *st_main_rusage = NULL;
+ RRDDIM *rd_main_user = NULL;
+ RRDDIM *rd_main_system = NULL;
+ create_main_rusage_chart(&st_main_rusage, &rd_main_user, &rd_main_system);
+
+ usec_t step_ut = localhost->rrd_update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ while (!netdata_exit) {
+ heartbeat_next(&hb, step_ut);
+ engine->now = now_realtime_sec();
+
+ if (mark_scheduled_instances(engine))
+ prepare_buffers(engine);
+
+ send_main_rusage(st_main_rusage, rd_main_user, rd_main_system);
+
+#ifdef UNIT_TESTING
+ return NULL;
+#endif
+ }
+
+cleanup:
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/exporting/exporting_engine.h b/exporting/exporting_engine.h
new file mode 100644
index 000000000..1d9feb7dd
--- /dev/null
+++ b/exporting/exporting_engine.h
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_ENGINE_H
+#define NETDATA_EXPORTING_ENGINE_H 1
+
+#include "daemon/common.h"
+
+#include <uv.h>
+
+#define exporter_get(section, name, value) expconfig_get(&exporting_config, section, name, value)
+#define exporter_get_number(section, name, value) expconfig_get_number(&exporting_config, section, name, value)
+#define exporter_get_boolean(section, name, value) expconfig_get_boolean(&exporting_config, section, name, value)
+
+extern struct config exporting_config;
+
+#define EXPORTING_UPDATE_EVERY_OPTION_NAME "update every"
+#define EXPORTING_UPDATE_EVERY_DEFAULT 10
+
+typedef enum exporting_options {
+ EXPORTING_OPTION_NON = 0,
+
+ EXPORTING_SOURCE_DATA_AS_COLLECTED = (1 << 0),
+ EXPORTING_SOURCE_DATA_AVERAGE = (1 << 1),
+ EXPORTING_SOURCE_DATA_SUM = (1 << 2),
+
+ EXPORTING_OPTION_SEND_CONFIGURED_LABELS = (1 << 3),
+ EXPORTING_OPTION_SEND_AUTOMATIC_LABELS = (1 << 4),
+ EXPORTING_OPTION_USE_TLS = (1 << 5),
+
+ EXPORTING_OPTION_SEND_NAMES = (1 << 16)
+} EXPORTING_OPTIONS;
+
+#define EXPORTING_OPTIONS_SOURCE_BITS \
+ (EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_SOURCE_DATA_SUM)
+#define EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) (exporting_options & EXPORTING_OPTIONS_SOURCE_BITS)
+
+#define sending_labels_configured(instance) \
+ (instance->config.options & (EXPORTING_OPTION_SEND_CONFIGURED_LABELS | EXPORTING_OPTION_SEND_AUTOMATIC_LABELS))
+
+#define should_send_label(instance, label) \
+ ((instance->config.options & EXPORTING_OPTION_SEND_CONFIGURED_LABELS && \
+ label->label_source == LABEL_SOURCE_NETDATA_CONF) || \
+ (instance->config.options & EXPORTING_OPTION_SEND_AUTOMATIC_LABELS && \
+ label->label_source != LABEL_SOURCE_NETDATA_CONF))
+
+typedef enum exporting_connector_types {
+ EXPORTING_CONNECTOR_TYPE_UNKNOWN, // Invalid type
+ EXPORTING_CONNECTOR_TYPE_GRAPHITE, // Send plain text to Graphite
+ EXPORTING_CONNECTOR_TYPE_GRAPHITE_HTTP, // Send data to Graphite using HTTP API
+ EXPORTING_CONNECTOR_TYPE_JSON, // Send data in JSON format
+ EXPORTING_CONNECTOR_TYPE_JSON_HTTP, // Send data in JSON format using HTTP API
+ EXPORTING_CONNECTOR_TYPE_OPENTSDB, // Send data to OpenTSDB using telnet API
+ EXPORTING_CONNECTOR_TYPE_OPENTSDB_HTTP, // Send data to OpenTSDB using HTTP API
+ EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE, // User selected to use Prometheus backend
+ EXPORTING_CONNECTOR_TYPE_KINESIS, // Send message to AWS Kinesis
+ EXPORTING_CONNECTOR_TYPE_PUBSUB, // Send message to Google Cloud Pub/Sub
+ EXPORTING_CONNECTOR_TYPE_MONGODB, // Send data to MongoDB collection
+ EXPORTING_CONNECTOR_TYPE_NUM // Number of backend types
+} EXPORTING_CONNECTOR_TYPE;
+
+struct engine;
+
+struct instance_config {
+ EXPORTING_CONNECTOR_TYPE type;
+ const char *type_name;
+
+ const char *name;
+ const char *destination;
+ const char *prefix;
+ const char *hostname;
+
+ int update_every;
+ int buffer_on_failures;
+ long timeoutms;
+
+ EXPORTING_OPTIONS options;
+ SIMPLE_PATTERN *charts_pattern;
+ SIMPLE_PATTERN *hosts_pattern;
+
+ void *connector_specific_config;
+};
+
+struct simple_connector_config {
+ int default_port;
+};
+
+struct simple_connector_buffer {
+ BUFFER *header;
+ BUFFER *buffer;
+
+ size_t buffered_metrics;
+ size_t buffered_bytes;
+
+ int used;
+
+ struct simple_connector_buffer *next;
+};
+
+struct simple_connector_data {
+ void *connector_specific_data;
+
+ size_t total_buffered_metrics;
+
+ BUFFER *header;
+ BUFFER *buffer;
+ size_t buffered_metrics;
+ size_t buffered_bytes;
+
+ struct simple_connector_buffer *previous_buffer;
+ struct simple_connector_buffer *first_buffer;
+ struct simple_connector_buffer *last_buffer;
+
+#ifdef ENABLE_HTTPS
+ SSL *conn; //SSL connection
+ int flags; //The flags for SSL connection
+#endif
+};
+
+struct prometheus_remote_write_specific_config {
+ char *remote_write_path;
+};
+
+struct aws_kinesis_specific_config {
+ char *stream_name;
+ char *auth_key_id;
+ char *secure_key;
+};
+
+struct pubsub_specific_config {
+ char *credentials_file;
+ char *project_id;
+ char *topic_id;
+};
+
+struct mongodb_specific_config {
+ char *database;
+ char *collection;
+};
+
+struct engine_config {
+ const char *hostname;
+ int update_every;
+};
+
+struct stats {
+ collected_number buffered_metrics;
+ collected_number lost_metrics;
+ collected_number sent_metrics;
+ collected_number buffered_bytes;
+ collected_number lost_bytes;
+ collected_number sent_bytes;
+ collected_number received_bytes;
+ collected_number transmission_successes;
+ collected_number data_lost_events;
+ collected_number reconnects;
+ collected_number transmission_failures;
+ collected_number receptions;
+
+ int initialized;
+
+ RRDSET *st_metrics;
+ RRDDIM *rd_buffered_metrics;
+ RRDDIM *rd_lost_metrics;
+ RRDDIM *rd_sent_metrics;
+
+ RRDSET *st_bytes;
+ RRDDIM *rd_buffered_bytes;
+ RRDDIM *rd_lost_bytes;
+ RRDDIM *rd_sent_bytes;
+ RRDDIM *rd_received_bytes;
+
+ RRDSET *st_ops;
+ RRDDIM *rd_transmission_successes;
+ RRDDIM *rd_data_lost_events;
+ RRDDIM *rd_reconnects;
+ RRDDIM *rd_transmission_failures;
+ RRDDIM *rd_receptions;
+
+ RRDSET *st_rusage;
+ RRDDIM *rd_user;
+ RRDDIM *rd_system;
+};
+
+struct instance {
+ struct instance_config config;
+ void *buffer;
+ void (*worker)(void *instance_p);
+ struct stats stats;
+
+ int scheduled;
+ int disabled;
+ int skip_host;
+ int skip_chart;
+
+ BUFFER *labels;
+
+ time_t after;
+ time_t before;
+
+ uv_thread_t thread;
+ uv_mutex_t mutex;
+ uv_cond_t cond_var;
+ int data_is_ready;
+
+ int (*start_batch_formatting)(struct instance *instance);
+ int (*start_host_formatting)(struct instance *instance, RRDHOST *host);
+ int (*start_chart_formatting)(struct instance *instance, RRDSET *st);
+ int (*metric_formatting)(struct instance *instance, RRDDIM *rd);
+ int (*end_chart_formatting)(struct instance *instance, RRDSET *st);
+ int (*end_host_formatting)(struct instance *instance, RRDHOST *host);
+ int (*end_batch_formatting)(struct instance *instance);
+
+ void (*prepare_header)(struct instance *instance);
+ int (*check_response)(BUFFER *buffer, struct instance *instance);
+
+ void *connector_specific_data;
+
+ size_t index;
+ struct instance *next;
+ struct engine *engine;
+
+ volatile sig_atomic_t exited;
+};
+
+struct engine {
+ struct engine_config config;
+
+ size_t instance_num;
+ time_t now;
+
+ int aws_sdk_initialized;
+ int protocol_buffers_initialized;
+ int mongoc_initialized;
+
+ struct instance *instance_root;
+
+ volatile sig_atomic_t exit;
+};
+
+extern struct instance *prometheus_exporter_instance;
+
+void *exporting_main(void *ptr);
+
+struct engine *read_exporting_config();
+EXPORTING_CONNECTOR_TYPE exporting_select_type(const char *type);
+
+int init_connectors(struct engine *engine);
+void simple_connector_init(struct instance *instance);
+
+int mark_scheduled_instances(struct engine *engine);
+void prepare_buffers(struct engine *engine);
+
+size_t exporting_name_copy(char *dst, const char *src, size_t max_len);
+
+int rrdhost_is_exportable(struct instance *instance, RRDHOST *host);
+int rrdset_is_exportable(struct instance *instance, RRDSET *st);
+
+calculated_number exporting_calculate_value_from_stored_data(
+ struct instance *instance,
+ RRDDIM *rd,
+ time_t *last_timestamp);
+
+void start_batch_formatting(struct engine *engine);
+void start_host_formatting(struct engine *engine, RRDHOST *host);
+void start_chart_formatting(struct engine *engine, RRDSET *st);
+void metric_formatting(struct engine *engine, RRDDIM *rd);
+void end_chart_formatting(struct engine *engine, RRDSET *st);
+void end_host_formatting(struct engine *engine, RRDHOST *host);
+void end_batch_formatting(struct engine *engine);
+int flush_host_labels(struct instance *instance, RRDHOST *host);
+int simple_connector_end_batch(struct instance *instance);
+
+int exporting_discard_response(BUFFER *buffer, struct instance *instance);
+void simple_connector_receive_response(int *sock, struct instance *instance);
+void simple_connector_send_buffer(
+ int *sock, int *failures, struct instance *instance, BUFFER *header, BUFFER *buffer, size_t buffered_metrics);
+void simple_connector_worker(void *instance_p);
+
+void create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system);
+void send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system);
+void send_internal_metrics(struct instance *instance);
+
+extern void clean_instance(struct instance *ptr);
+void simple_connector_cleanup(struct instance *instance);
+
+static inline void disable_instance(struct instance *instance)
+{
+ instance->disabled = 1;
+ instance->scheduled = 0;
+ uv_mutex_unlock(&instance->mutex);
+ error("EXPORTING: Instance %s disabled", instance->config.name);
+}
+
+#include "exporting/prometheus/prometheus.h"
+#include "exporting/opentsdb/opentsdb.h"
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#include "exporting/prometheus/remote_write/remote_write.h"
+#endif
+
+#if HAVE_KINESIS
+#include "exporting/aws_kinesis/aws_kinesis.h"
+#endif
+
+#endif /* NETDATA_EXPORTING_ENGINE_H */
diff --git a/exporting/graphite/Makefile.am b/exporting/graphite/Makefile.am
new file mode 100644
index 000000000..babdcf0df
--- /dev/null
+++ b/exporting/graphite/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/exporting/graphite/README.md b/exporting/graphite/README.md
new file mode 100644
index 000000000..a6a25ef7a
--- /dev/null
+++ b/exporting/graphite/README.md
@@ -0,0 +1,30 @@
+<!--
+title: "Export metrics to Graphite providers"
+description: "Archive your Agent's metrics to a any Graphite database provider for long-term storage, further analysis, or correlation with data from other sources."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/graphite/README.md
+sidebar_label: Graphite
+-->
+
+# Export metrics to Graphite providers
+
+You can use the Graphite connector for the [exporting engine](/exporting/README.md) to archive your agent's metrics to
+Graphite providers for long-term storage, further analysis, or correlation with data from other sources.
+
+## Configuration
+
+To enable data exporting to a Graphite database, run `./edit-config exporting.conf` in the Netdata configuration
+directory and set the following options:
+
+```conf
+[graphite:my_graphite_instance]
+ enabled = yes
+ destination = localhost:2003
+```
+
+Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For example: `graphite:http:my_graphite_instance`,
+`graphite:https:my_graphite_instance`.
+
+The Graphite connector is further configurable using additional settings. See the [exporting reference
+doc](/exporting/README.md#options) for details.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Fjson%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/graphite/graphite.c b/exporting/graphite/graphite.c
new file mode 100644
index 000000000..9c09631f1
--- /dev/null
+++ b/exporting/graphite/graphite.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "graphite.h"
+
+/**
+ * Initialize Graphite connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_graphite_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ struct simple_connector_config *connector_specific_config = callocz(1, sizeof(struct simple_connector_config));
+ instance->config.connector_specific_config = (void *)connector_specific_config;
+ connector_specific_config->default_port = 2003;
+
+ struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = connector_specific_data;
+
+#ifdef ENABLE_HTTPS
+ connector_specific_data->flags = NETDATA_SSL_START;
+ connector_specific_data->conn = NULL;
+ if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
+ security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ }
+#endif
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_labels_graphite_plaintext;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_graphite_plaintext;
+ else
+ instance->metric_formatting = format_dimension_stored_graphite_plaintext;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = simple_connector_end_batch;
+
+ if (instance->config.type == EXPORTING_CONNECTOR_TYPE_GRAPHITE_HTTP)
+ instance->prepare_header = graphite_http_prepare_header;
+ else
+ instance->prepare_header = NULL;
+
+ instance->check_response = exporting_discard_response;
+
+ instance->buffer = (void *)buffer_create(0);
+ if (!instance->buffer) {
+ error("EXPORTING: cannot create buffer for graphite exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+
+ simple_connector_init(instance);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Copy a label value and substitute underscores in place of charachters which can't be used in Graphite output
+ *
+ * @param dst a destination string.
+ * @param src a source string.
+ * @param len the maximum number of characters copied.
+ */
+
+void sanitize_graphite_label_value(char *dst, char *src, size_t len)
+{
+ while (*src != '\0' && len) {
+ if (isspace(*src) || *src == ';' || *src == '~')
+ *dst++ = '_';
+ else
+ *dst++ = *src;
+ src++;
+ len--;
+ }
+ *dst = '\0';
+}
+
+/**
+ * Format host labels for JSON connector
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @return Always returns 0.
+ */
+int format_host_labels_graphite_plaintext(struct instance *instance, RRDHOST *host)
+{
+ if (!instance->labels)
+ instance->labels = buffer_create(1024);
+
+ if (unlikely(!sending_labels_configured(instance)))
+ return 0;
+
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ for (struct label *label = host->labels.head; label; label = label->next) {
+ if (!should_send_label(instance, label))
+ continue;
+
+ char value[CONFIG_MAX_VALUE + 1];
+ sanitize_graphite_label_value(value, label->value, CONFIG_MAX_VALUE);
+
+ if (*value) {
+ buffer_strcat(instance->labels, ";");
+ buffer_sprintf(instance->labels, "%s=%s", label->key, value);
+ }
+ }
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+
+ return 0;
+}
+
+/**
+ * Format dimension using collected data for Graphite connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_collected_graphite_plaintext(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ chart_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? st->name : st->id,
+ RRD_ID_LENGTH_MAX);
+
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ dimension_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ RRD_ID_LENGTH_MAX);
+
+ buffer_sprintf(
+ instance->buffer,
+ "%s.%s.%s.%s%s%s%s " COLLECTED_NUMBER_FORMAT " %llu\n",
+ instance->config.prefix,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ chart_name,
+ dimension_name,
+ (host->tags) ? ";" : "",
+ (host->tags) ? host->tags : "",
+ (instance->labels) ? buffer_tostring(instance->labels) : "",
+ rd->last_collected_value,
+ (unsigned long long)rd->last_collected_time.tv_sec);
+
+ return 0;
+}
+
+/**
+ * Format dimension using a calculated value from stored data for Graphite connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_stored_graphite_plaintext(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ chart_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? st->name : st->id,
+ RRD_ID_LENGTH_MAX);
+
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ dimension_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ RRD_ID_LENGTH_MAX);
+
+ time_t last_t;
+ calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
+
+ if(isnan(value))
+ return 0;
+
+ buffer_sprintf(
+ instance->buffer,
+ "%s.%s.%s.%s%s%s%s " CALCULATED_NUMBER_FORMAT " %llu\n",
+ instance->config.prefix,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ chart_name,
+ dimension_name,
+ (host->tags) ? ";" : "",
+ (host->tags) ? host->tags : "",
+ (instance->labels) ? buffer_tostring(instance->labels) : "",
+ value,
+ (unsigned long long)last_t);
+
+ return 0;
+}
+
+/**
+ * Ppepare HTTP header
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+void graphite_http_prepare_header(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data = instance->connector_specific_data;
+
+ buffer_sprintf(
+ simple_connector_data->last_buffer->header,
+ "POST /api/put HTTP/1.1\r\n"
+ "Host: %s\r\n"
+ "Content-Type: application/graphite\r\n"
+ "Content-Length: %lu\r\n"
+ "\r\n",
+ instance->config.destination,
+ buffer_strlen(simple_connector_data->last_buffer->buffer));
+
+ return;
+}
diff --git a/exporting/graphite/graphite.h b/exporting/graphite/graphite.h
new file mode 100644
index 000000000..993c12e57
--- /dev/null
+++ b/exporting/graphite/graphite.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_GRAPHITE_H
+#define NETDATA_EXPORTING_GRAPHITE_H
+
+#include "exporting/exporting_engine.h"
+
+int init_graphite_instance(struct instance *instance);
+
+void sanitize_graphite_label_value(char *dst, char *src, size_t len);
+int format_host_labels_graphite_plaintext(struct instance *instance, RRDHOST *host);
+
+int format_dimension_collected_graphite_plaintext(struct instance *instance, RRDDIM *rd);
+int format_dimension_stored_graphite_plaintext(struct instance *instance, RRDDIM *rd);
+
+void graphite_http_prepare_header(struct instance *instance);
+
+#endif //NETDATA_EXPORTING_GRAPHITE_H
diff --git a/exporting/init_connectors.c b/exporting/init_connectors.c
new file mode 100644
index 000000000..6aff26354
--- /dev/null
+++ b/exporting/init_connectors.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+#include "graphite/graphite.h"
+#include "json/json.h"
+#include "opentsdb/opentsdb.h"
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#include "prometheus/remote_write/remote_write.h"
+#endif
+
+#if HAVE_KINESIS
+#include "aws_kinesis/aws_kinesis.h"
+#endif
+
+#if ENABLE_EXPORTING_PUBSUB
+#include "pubsub/pubsub.h"
+#endif
+
+#if HAVE_MONGOC
+#include "mongodb/mongodb.h"
+#endif
+
+/**
+ * Initialize connectors
+ *
+ * @param engine an engine data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_connectors(struct engine *engine)
+{
+ engine->now = now_realtime_sec();
+
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ instance->index = engine->instance_num++;
+ instance->after = engine->now;
+
+ switch (instance->config.type) {
+ case EXPORTING_CONNECTOR_TYPE_GRAPHITE:
+ if (init_graphite_instance(instance) != 0)
+ return 1;
+ break;
+ case EXPORTING_CONNECTOR_TYPE_GRAPHITE_HTTP:
+ if (init_graphite_instance(instance) != 0)
+ return 1;
+ break;
+ case EXPORTING_CONNECTOR_TYPE_JSON:
+ if (init_json_instance(instance) != 0)
+ return 1;
+ break;
+ case EXPORTING_CONNECTOR_TYPE_JSON_HTTP:
+ if (init_json_http_instance(instance) != 0)
+ return 1;
+ break;
+ case EXPORTING_CONNECTOR_TYPE_OPENTSDB:
+ if (init_opentsdb_telnet_instance(instance) != 0)
+ return 1;
+ break;
+ case EXPORTING_CONNECTOR_TYPE_OPENTSDB_HTTP:
+ if (init_opentsdb_http_instance(instance) != 0)
+ return 1;
+ break;
+ case EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE:
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ if (init_prometheus_remote_write_instance(instance) != 0)
+ return 1;
+#endif
+ break;
+ case EXPORTING_CONNECTOR_TYPE_KINESIS:
+#if HAVE_KINESIS
+ if (init_aws_kinesis_instance(instance) != 0)
+ return 1;
+#endif
+ break;
+ case EXPORTING_CONNECTOR_TYPE_PUBSUB:
+#if ENABLE_EXPORTING_PUBSUB
+ if (init_pubsub_instance(instance) != 0)
+ return 1;
+#endif
+ break;
+ case EXPORTING_CONNECTOR_TYPE_MONGODB:
+#if HAVE_MONGOC
+ if (init_mongodb_instance(instance) != 0)
+ return 1;
+#endif
+ break;
+ default:
+ error("EXPORTING: unknown exporting connector type");
+ return 1;
+ }
+
+ // dispatch the instance worker thread
+ int error = uv_thread_create(&instance->thread, instance->worker, instance);
+ if (error) {
+ error("EXPORTING: cannot create tread worker. uv_thread_create(): %s", uv_strerror(error));
+ return 1;
+ }
+ char threadname[NETDATA_THREAD_NAME_MAX + 1];
+ snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "EXPORTING-%zu", instance->index);
+ uv_thread_set_name_np(instance->thread, threadname);
+
+ send_statistics("EXPORTING_START", "OK", instance->config.type_name);
+ }
+
+ return 0;
+}
+
+/**
+ * Initialize a ring buffer for a simple connector
+ *
+ * @param instance an instance data structure.
+ */
+void simple_connector_init(struct instance *instance)
+{
+ struct simple_connector_data *connector_specific_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+
+ if (connector_specific_data->first_buffer)
+ return;
+
+ connector_specific_data->header = buffer_create(0);
+ connector_specific_data->buffer = buffer_create(0);
+
+ // create a ring buffer
+ struct simple_connector_buffer *first_buffer = NULL;
+
+ if (instance->config.buffer_on_failures < 1)
+ instance->config.buffer_on_failures = 1;
+
+ for (int i = 0; i < instance->config.buffer_on_failures; i++) {
+ struct simple_connector_buffer *current_buffer = callocz(1, sizeof(struct simple_connector_buffer));
+
+ if (!connector_specific_data->first_buffer)
+ first_buffer = current_buffer;
+ else
+ current_buffer->next = connector_specific_data->first_buffer;
+
+ connector_specific_data->first_buffer = current_buffer;
+ }
+
+ first_buffer->next = connector_specific_data->first_buffer;
+ connector_specific_data->last_buffer = connector_specific_data->first_buffer;
+
+ return;
+}
diff --git a/exporting/json/Makefile.am b/exporting/json/Makefile.am
new file mode 100644
index 000000000..babdcf0df
--- /dev/null
+++ b/exporting/json/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/exporting/json/README.md b/exporting/json/README.md
new file mode 100644
index 000000000..a0f8472a0
--- /dev/null
+++ b/exporting/json/README.md
@@ -0,0 +1,30 @@
+<!--
+title: "Export metrics to JSON document databases"
+description: "Archive your Agent's metrics to a JSON document database for long-term storage, further analysis, or correlation with data from other sources."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/json/README.md
+sidebar_label: JSON Document Databases
+-->
+
+# Export metrics to JSON document databases
+
+You can use the JSON connector for the [exporting engine](/exporting/README.md) to archive your agent's metrics to JSON
+document databases for long-term storage, further analysis, or correlation with data from other sources.
+
+## Configuration
+
+To enable data exporting to a JSON document database, run `./edit-config exporting.conf` in the Netdata configuration
+directory and set the following options:
+
+```conf
+[json:my_json_instance]
+ enabled = yes
+ destination = localhost:5448
+```
+
+Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For example: `json:http:my_json_instance`,
+`json:https:my_json_instance`.
+
+The JSON connector is further configurable using additional settings. See the [exporting reference
+doc](/exporting/README.md#options) for details.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Fjson%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/json/json.c b/exporting/json/json.c
new file mode 100644
index 000000000..f2396bafa
--- /dev/null
+++ b/exporting/json/json.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "json.h"
+
+/**
+ * Initialize JSON connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_json_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ struct simple_connector_config *connector_specific_config = callocz(1, sizeof(struct simple_connector_config));
+ instance->config.connector_specific_config = (void *)connector_specific_config;
+ connector_specific_config->default_port = 5448;
+
+ struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = connector_specific_data;
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_labels_json_plaintext;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_json_plaintext;
+ else
+ instance->metric_formatting = format_dimension_stored_json_plaintext;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = simple_connector_end_batch;
+
+ instance->prepare_header = NULL;
+
+ instance->check_response = exporting_discard_response;
+
+ instance->buffer = (void *)buffer_create(0);
+ if (!instance->buffer) {
+ error("EXPORTING: cannot create buffer for json exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+
+ simple_connector_init(instance);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Initialize JSON connector instance for HTTP protocol
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_json_http_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ struct simple_connector_config *connector_specific_config = callocz(1, sizeof(struct simple_connector_config));
+ instance->config.connector_specific_config = (void *)connector_specific_config;
+ connector_specific_config->default_port = 5448;
+
+ struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = connector_specific_data;
+
+#ifdef ENABLE_HTTPS
+ connector_specific_data->flags = NETDATA_SSL_START;
+ connector_specific_data->conn = NULL;
+ if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
+ security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ }
+#endif
+
+ instance->start_batch_formatting = open_batch_json_http;
+ instance->start_host_formatting = format_host_labels_json_plaintext;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_json_plaintext;
+ else
+ instance->metric_formatting = format_dimension_stored_json_plaintext;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = close_batch_json_http;
+
+ instance->prepare_header = json_http_prepare_header;
+
+ instance->check_response = exporting_discard_response;
+
+ instance->buffer = (void *)buffer_create(0);
+
+ simple_connector_init(instance);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Format host labels for JSON connector
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @return Always returns 0.
+ */
+int format_host_labels_json_plaintext(struct instance *instance, RRDHOST *host)
+{
+ if (!instance->labels)
+ instance->labels = buffer_create(1024);
+
+ if (unlikely(!sending_labels_configured(instance)))
+ return 0;
+
+ buffer_strcat(instance->labels, "\"labels\":{");
+
+ int count = 0;
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ for (struct label *label = host->labels.head; label; label = label->next) {
+ if (!should_send_label(instance, label))
+ continue;
+
+ char value[CONFIG_MAX_VALUE * 2 + 1];
+ sanitize_json_string(value, label->value, CONFIG_MAX_VALUE);
+ if (count > 0)
+ buffer_strcat(instance->labels, ",");
+ buffer_sprintf(instance->labels, "\"%s\":\"%s\"", label->key, value);
+
+ count++;
+ }
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+
+ buffer_strcat(instance->labels, "},");
+
+ return 0;
+}
+
+/**
+ * Format dimension using collected data for JSON connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_collected_json_plaintext(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ const char *tags_pre = "", *tags_post = "", *tags = host->tags;
+ if (!tags)
+ tags = "";
+
+ if (*tags) {
+ if (*tags == '{' || *tags == '[' || *tags == '"') {
+ tags_pre = "\"host_tags\":";
+ tags_post = ",";
+ } else {
+ tags_pre = "\"host_tags\":\"";
+ tags_post = "\",";
+ }
+ }
+
+ if (instance->config.type == EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
+ if (buffer_strlen((BUFFER *)instance->buffer) > 2)
+ buffer_strcat(instance->buffer, ",\n");
+ }
+
+ buffer_sprintf(
+ instance->buffer,
+
+ "{"
+ "\"prefix\":\"%s\","
+ "\"hostname\":\"%s\","
+ "%s%s%s"
+ "%s"
+
+ "\"chart_id\":\"%s\","
+ "\"chart_name\":\"%s\","
+ "\"chart_family\":\"%s\","
+ "\"chart_context\":\"%s\","
+ "\"chart_type\":\"%s\","
+ "\"units\":\"%s\","
+
+ "\"id\":\"%s\","
+ "\"name\":\"%s\","
+ "\"value\":" COLLECTED_NUMBER_FORMAT ","
+
+ "\"timestamp\":%llu}",
+
+ instance->config.prefix,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ tags_pre,
+ tags,
+ tags_post,
+ instance->labels ? buffer_tostring(instance->labels) : "",
+
+ st->id,
+ st->name,
+ st->family,
+ st->context,
+ st->type,
+ st->units,
+
+ rd->id,
+ rd->name,
+ rd->last_collected_value,
+
+ (unsigned long long)rd->last_collected_time.tv_sec);
+
+ if (instance->config.type != EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
+ buffer_strcat(instance->buffer, "\n");
+ }
+
+ return 0;
+}
+
+/**
+ * Format dimension using a calculated value from stored data for JSON connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_stored_json_plaintext(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ time_t last_t;
+ calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
+
+ if(isnan(value))
+ return 0;
+
+ const char *tags_pre = "", *tags_post = "", *tags = host->tags;
+ if (!tags)
+ tags = "";
+
+ if (*tags) {
+ if (*tags == '{' || *tags == '[' || *tags == '"') {
+ tags_pre = "\"host_tags\":";
+ tags_post = ",";
+ } else {
+ tags_pre = "\"host_tags\":\"";
+ tags_post = "\",";
+ }
+ }
+
+ if (instance->config.type == EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
+ if (buffer_strlen((BUFFER *)instance->buffer) > 2)
+ buffer_strcat(instance->buffer, ",\n");
+ }
+
+ buffer_sprintf(
+ instance->buffer,
+ "{"
+ "\"prefix\":\"%s\","
+ "\"hostname\":\"%s\","
+ "%s%s%s"
+ "%s"
+
+ "\"chart_id\":\"%s\","
+ "\"chart_name\":\"%s\","
+ "\"chart_family\":\"%s\","
+ "\"chart_context\": \"%s\","
+ "\"chart_type\":\"%s\","
+ "\"units\": \"%s\","
+
+ "\"id\":\"%s\","
+ "\"name\":\"%s\","
+ "\"value\":" CALCULATED_NUMBER_FORMAT ","
+
+ "\"timestamp\": %llu}",
+
+ instance->config.prefix,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ tags_pre,
+ tags,
+ tags_post,
+ instance->labels ? buffer_tostring(instance->labels) : "",
+
+ st->id,
+ st->name,
+ st->family,
+ st->context,
+ st->type,
+ st->units,
+
+ rd->id,
+ rd->name,
+ value,
+
+ (unsigned long long)last_t);
+
+ if (instance->config.type != EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
+ buffer_strcat(instance->buffer, "\n");
+ }
+
+ return 0;
+}
+
+/**
+ * Open a JSON list for a bach
+ *
+ * @param instance an instance data structure.
+ * @return Always returns 0.
+ */
+int open_batch_json_http(struct instance *instance)
+{
+ buffer_strcat(instance->buffer, "[\n");
+
+ return 0;
+}
+
+/**
+ * Close a JSON list for a bach and update buffered bytes counter
+ *
+ * @param instance an instance data structure.
+ * @return Always returns 0.
+ */
+int close_batch_json_http(struct instance *instance)
+{
+ buffer_strcat(instance->buffer, "\n]\n");
+
+ simple_connector_end_batch(instance);
+
+ return 0;
+}
+
+/**
+ * Prepare HTTP header
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+void json_http_prepare_header(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data = instance->connector_specific_data;
+
+ buffer_sprintf(
+ simple_connector_data->last_buffer->header,
+ "POST /api/put HTTP/1.1\r\n"
+ "Host: %s\r\n"
+ "Content-Type: application/json\r\n"
+ "Content-Length: %lu\r\n"
+ "\r\n",
+ instance->config.destination,
+ buffer_strlen(simple_connector_data->last_buffer->buffer));
+
+ return;
+}
diff --git a/exporting/json/json.h b/exporting/json/json.h
new file mode 100644
index 000000000..d916263a9
--- /dev/null
+++ b/exporting/json/json.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_JSON_H
+#define NETDATA_EXPORTING_JSON_H
+
+#include "exporting/exporting_engine.h"
+
+int init_json_instance(struct instance *instance);
+int init_json_http_instance(struct instance *instance);
+
+int format_host_labels_json_plaintext(struct instance *instance, RRDHOST *host);
+
+int format_dimension_collected_json_plaintext(struct instance *instance, RRDDIM *rd);
+int format_dimension_stored_json_plaintext(struct instance *instance, RRDDIM *rd);
+
+int open_batch_json_http(struct instance *instance);
+int close_batch_json_http(struct instance *instance);
+
+void json_http_prepare_header(struct instance *instance);
+
+#endif //NETDATA_EXPORTING_JSON_H
diff --git a/exporting/mongodb/Makefile.am b/exporting/mongodb/Makefile.am
new file mode 100644
index 000000000..161784b8f
--- /dev/null
+++ b/exporting/mongodb/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/exporting/mongodb/README.md b/exporting/mongodb/README.md
new file mode 100644
index 000000000..2934f38c5
--- /dev/null
+++ b/exporting/mongodb/README.md
@@ -0,0 +1,38 @@
+<!--
+title: "Export metrics to MongoDB"
+description: "Archive your Agent's metrics to a MongoDB database for long-term storage, further analysis, or correlation with data from other sources."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/mongodb/README.md
+sidebar_label: MongoDB
+-->
+
+# Export metrics to MongoDB
+
+You can use the MongoDB connector for the [exporting engine](/exporting/README.md) to archive your agent's metrics to a
+MongoDB database for long-term storage, further analysis, or correlation with data from other sources.
+
+## Prerequisites
+
+To use MongoDB as an external storage for long-term archiving, you should first
+[install](http://mongoc.org/libmongoc/current/installing.html) `libmongoc` 1.7.0 or higher. Next, re-install Netdata
+from the source, which detects that the required library is now available.
+
+## Configuration
+
+To enable data exporting to a MongoDB database, run `./edit-config exporting.conf` in the Netdata configuration
+directory and set the following options:
+
+```conf
+[mongodb:my_instance]
+ enabled = yes
+ destination = mongodb://<hostname>
+ database = your_database_name
+ collection = your_collection_name
+```
+
+You can find more information about the `destination` string URI format in the MongoDB
+[documentation](https://docs.mongodb.com/manual/reference/connection-string/)
+
+The default socket timeout depends on the exporting connector update interval. The timeout is 500 ms shorter than the
+interval (but not less than 1000 ms). You can alter the timeout using the `sockettimeoutms` MongoDB URI option.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Fmongodb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/mongodb/mongodb.c b/exporting/mongodb/mongodb.c
new file mode 100644
index 000000000..44922a242
--- /dev/null
+++ b/exporting/mongodb/mongodb.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define EXPORTING_INTERNALS
+#include "mongodb.h"
+
+#define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2)
+
+/**
+ * Initialize MongoDB connector specific data, including a ring buffer
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int mongodb_init(struct instance *instance)
+{
+ struct mongodb_specific_config *connector_specific_config = instance->config.connector_specific_config;
+ mongoc_uri_t *uri;
+ bson_error_t bson_error;
+
+ if (unlikely(!connector_specific_config->collection || !*connector_specific_config->collection)) {
+ error("EXPORTING: collection name is a mandatory MongoDB parameter, but it is not configured");
+ return 1;
+ }
+
+ uri = mongoc_uri_new_with_error(instance->config.destination, &bson_error);
+ if (unlikely(!uri)) {
+ error(
+ "EXPORTING: failed to parse URI: %s. Error message: %s", instance->config.destination, bson_error.message);
+ return 1;
+ }
+
+ int32_t socket_timeout =
+ mongoc_uri_get_option_as_int32(uri, MONGOC_URI_SOCKETTIMEOUTMS, instance->config.timeoutms);
+ if (!mongoc_uri_set_option_as_int32(uri, MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout)) {
+ error("EXPORTING: failed to set %s to the value %d", MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout);
+ return 1;
+ };
+
+ struct mongodb_specific_data *connector_specific_data =
+ (struct mongodb_specific_data *)instance->connector_specific_data;
+
+ connector_specific_data->client = mongoc_client_new_from_uri(uri);
+ if (unlikely(!connector_specific_data->client)) {
+ error("EXPORTING: failed to create a new client");
+ return 1;
+ }
+
+ if (!mongoc_client_set_appname(connector_specific_data->client, "netdata")) {
+ error("EXPORTING: failed to set client appname");
+ };
+
+ connector_specific_data->collection = mongoc_client_get_collection(
+ connector_specific_data->client, connector_specific_config->database, connector_specific_config->collection);
+
+ mongoc_uri_destroy(uri);
+
+ // create a ring buffer
+ struct bson_buffer *first_buffer = NULL;
+
+ if (instance->config.buffer_on_failures < 2)
+ instance->config.buffer_on_failures = 1;
+ else
+ instance->config.buffer_on_failures -= 1;
+
+ for (int i = 0; i < instance->config.buffer_on_failures; i++) {
+ struct bson_buffer *current_buffer = callocz(1, sizeof(struct bson_buffer));
+
+ if (!connector_specific_data->first_buffer)
+ first_buffer = current_buffer;
+ else
+ current_buffer->next = connector_specific_data->first_buffer;
+
+ connector_specific_data->first_buffer = current_buffer;
+ }
+
+ first_buffer->next = connector_specific_data->first_buffer;
+ connector_specific_data->last_buffer = connector_specific_data->first_buffer;
+
+ return 0;
+}
+
+/**
+ * Initialize a MongoDB connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_mongodb_instance(struct instance *instance)
+{
+ instance->worker = mongodb_connector_worker;
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_labels_json_plaintext;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_json_plaintext;
+ else
+ instance->metric_formatting = format_dimension_stored_json_plaintext;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = format_batch_mongodb;
+
+ instance->prepare_header = NULL;
+ instance->check_response = NULL;
+
+ instance->buffer = (void *)buffer_create(0);
+ if (!instance->buffer) {
+ error("EXPORTING: cannot create buffer for MongoDB exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ struct mongodb_specific_data *connector_specific_data = callocz(1, sizeof(struct mongodb_specific_data));
+ instance->connector_specific_data = (void *)connector_specific_data;
+
+ instance->config.timeoutms =
+ (instance->config.update_every >= 2) ? (instance->engine->config.update_every * MSEC_PER_SEC - 500) : 1000;
+
+ if (!instance->engine->mongoc_initialized) {
+ mongoc_init();
+ instance->engine->mongoc_initialized = 1;
+ }
+
+ if (unlikely(mongodb_init(instance))) {
+ error("EXPORTING: cannot initialize MongoDB exporting connector");
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Free an array of BSON structures
+ *
+ * @param insert an array of documents.
+ * @param documents_inserted the number of documents inserted.
+ */
+void free_bson(bson_t **insert, size_t documents_inserted)
+{
+ size_t i;
+
+ for (i = 0; i < documents_inserted; i++)
+ bson_destroy(insert[i]);
+
+ freez(insert);
+}
+
+/**
+ * Format a batch for the MongoDB connector
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int format_batch_mongodb(struct instance *instance)
+{
+ struct mongodb_specific_data *connector_specific_data =
+ (struct mongodb_specific_data *)instance->connector_specific_data;
+ struct stats *stats = &instance->stats;
+
+ bson_t **insert = connector_specific_data->last_buffer->insert;
+ if (insert) {
+ // ring buffer is full, reuse the oldest element
+ connector_specific_data->first_buffer = connector_specific_data->first_buffer->next;
+ free_bson(insert, connector_specific_data->last_buffer->documents_inserted);
+ connector_specific_data->total_documents_inserted -= connector_specific_data->last_buffer->documents_inserted;
+ stats->buffered_bytes -= connector_specific_data->last_buffer->buffered_bytes;
+ }
+ insert = callocz((size_t)stats->buffered_metrics, sizeof(bson_t *));
+ connector_specific_data->last_buffer->insert = insert;
+
+ BUFFER *buffer = (BUFFER *)instance->buffer;
+ char *start = (char *)buffer_tostring(buffer);
+ char *end = start;
+
+ size_t documents_inserted = 0;
+
+ while (*end && documents_inserted <= (size_t)stats->buffered_metrics) {
+ while (*end && *end != '\n')
+ end++;
+
+ if (likely(*end)) {
+ *end = '\0';
+ end++;
+ } else {
+ break;
+ }
+
+ bson_error_t bson_error;
+ insert[documents_inserted] = bson_new_from_json((const uint8_t *)start, -1, &bson_error);
+
+ if (unlikely(!insert[documents_inserted])) {
+ error(
+ "EXPORTING: Failed creating a BSON document from a JSON string \"%s\" : %s", start, bson_error.message);
+ free_bson(insert, documents_inserted);
+ return 1;
+ }
+
+ start = end;
+
+ documents_inserted++;
+ }
+
+ stats->buffered_bytes += connector_specific_data->last_buffer->buffered_bytes = buffer_strlen(buffer);
+
+ buffer_flush(buffer);
+
+ // The stats->buffered_metrics is used in the MongoDB batch formatting as a variable for the number
+ // of metrics, added in the current iteration, so we are clearing it here. We will use the
+ // connector_specific_data->total_documents_inserted in the worker to show the statistics.
+ stats->buffered_metrics = 0;
+ connector_specific_data->total_documents_inserted += documents_inserted;
+
+ connector_specific_data->last_buffer->documents_inserted = documents_inserted;
+ connector_specific_data->last_buffer = connector_specific_data->last_buffer->next;
+
+ return 0;
+}
+
+/**
+ * Clean a MongoDB connector instance up
+ *
+ * @param instance an instance data structure.
+ */
+void mongodb_cleanup(struct instance *instance)
+{
+ info("EXPORTING: cleaning up instance %s ...", instance->config.name);
+
+ struct mongodb_specific_data *connector_specific_data =
+ (struct mongodb_specific_data *)instance->connector_specific_data;
+
+ mongoc_collection_destroy(connector_specific_data->collection);
+ mongoc_client_destroy(connector_specific_data->client);
+ if (instance->engine->mongoc_initialized) {
+ mongoc_cleanup();
+ instance->engine->mongoc_initialized = 0;
+ }
+
+ buffer_free(instance->buffer);
+
+ struct bson_buffer *next_buffer = connector_specific_data->first_buffer;
+ for (int i = 0; i < instance->config.buffer_on_failures; i++) {
+ struct bson_buffer *current_buffer = next_buffer;
+ next_buffer = next_buffer->next;
+
+ if (current_buffer->insert)
+ free_bson(current_buffer->insert, current_buffer->documents_inserted);
+ freez(current_buffer);
+ }
+
+ freez(connector_specific_data);
+
+ struct mongodb_specific_config *connector_specific_config =
+ (struct mongodb_specific_config *)instance->config.connector_specific_config;
+ freez(connector_specific_config->database);
+ freez(connector_specific_config->collection);
+ freez(connector_specific_config);
+
+ info("EXPORTING: instance %s exited", instance->config.name);
+ instance->exited = 1;
+
+ return;
+}
+
+/**
+ * MongoDB connector worker
+ *
+ * Runs in a separate thread for every instance.
+ *
+ * @param instance_p an instance data structure.
+ */
+void mongodb_connector_worker(void *instance_p)
+{
+ struct instance *instance = (struct instance *)instance_p;
+ struct mongodb_specific_config *connector_specific_config = instance->config.connector_specific_config;
+ struct mongodb_specific_data *connector_specific_data =
+ (struct mongodb_specific_data *)instance->connector_specific_data;
+
+ while (!instance->engine->exit) {
+ struct stats *stats = &instance->stats;
+
+ uv_mutex_lock(&instance->mutex);
+ if (!connector_specific_data->first_buffer->insert ||
+ !connector_specific_data->first_buffer->documents_inserted) {
+ while (!instance->data_is_ready)
+ uv_cond_wait(&instance->cond_var, &instance->mutex);
+ instance->data_is_ready = 0;
+ }
+
+ if (unlikely(instance->engine->exit)) {
+ uv_mutex_unlock(&instance->mutex);
+ break;
+ }
+
+ // reset the monitoring chart counters
+ stats->received_bytes =
+ stats->sent_bytes =
+ stats->sent_metrics =
+ stats->lost_metrics =
+ stats->receptions =
+ stats->transmission_successes =
+ stats->transmission_failures =
+ stats->data_lost_events =
+ stats->lost_bytes =
+ stats->reconnects = 0;
+
+ bson_t **insert = connector_specific_data->first_buffer->insert;
+ size_t documents_inserted = connector_specific_data->first_buffer->documents_inserted;
+ size_t buffered_bytes = connector_specific_data->first_buffer->buffered_bytes;
+
+ connector_specific_data->first_buffer->insert = NULL;
+ connector_specific_data->first_buffer->documents_inserted = 0;
+ connector_specific_data->first_buffer->buffered_bytes = 0;
+ connector_specific_data->first_buffer = connector_specific_data->first_buffer->next;
+
+ uv_mutex_unlock(&instance->mutex);
+
+ size_t data_size = 0;
+ for (size_t i = 0; i < documents_inserted; i++) {
+ data_size += insert[i]->len;
+ }
+
+ debug(
+ D_BACKEND,
+ "EXPORTING: mongodb_insert(): destination = %s, database = %s, collection = %s, data size = %zu",
+ instance->config.destination,
+ connector_specific_config->database,
+ connector_specific_config->collection,
+ data_size);
+
+ if (likely(documents_inserted != 0)) {
+ bson_error_t bson_error;
+ if (likely(mongoc_collection_insert_many(
+ connector_specific_data->collection,
+ (const bson_t **)insert,
+ documents_inserted,
+ NULL,
+ NULL,
+ &bson_error))) {
+ stats->sent_metrics = documents_inserted;
+ stats->sent_bytes += data_size;
+ stats->transmission_successes++;
+ stats->receptions++;
+ } else {
+ // oops! we couldn't send (all or some of the) data
+ error("EXPORTING: %s", bson_error.message);
+ error(
+ "EXPORTING: failed to write data to the database '%s'. "
+ "Willing to write %zu bytes, wrote %zu bytes.",
+ instance->config.destination, data_size, 0UL);
+
+ stats->transmission_failures++;
+ stats->data_lost_events++;
+ stats->lost_bytes += buffered_bytes;
+ stats->lost_metrics += documents_inserted;
+ }
+ }
+
+ free_bson(insert, documents_inserted);
+
+ if (unlikely(instance->engine->exit))
+ break;
+
+ uv_mutex_lock(&instance->mutex);
+
+ stats->buffered_metrics = connector_specific_data->total_documents_inserted;
+
+ send_internal_metrics(instance);
+
+ connector_specific_data->total_documents_inserted -= documents_inserted;
+
+ stats->buffered_metrics = 0;
+ stats->buffered_bytes -= buffered_bytes;
+
+ uv_mutex_unlock(&instance->mutex);
+
+#ifdef UNIT_TESTING
+ return;
+#endif
+ }
+
+ mongodb_cleanup(instance);
+}
diff --git a/exporting/mongodb/mongodb.h b/exporting/mongodb/mongodb.h
new file mode 100644
index 000000000..f1867b288
--- /dev/null
+++ b/exporting/mongodb/mongodb.h
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_MONGODB_H
+#define NETDATA_EXPORTING_MONGODB_H
+
+#include "exporting/exporting_engine.h"
+#include "exporting/json/json.h"
+#include <mongoc.h>
+
+struct bson_buffer {
+ bson_t **insert;
+ size_t documents_inserted;
+ size_t buffered_bytes;
+
+ struct bson_buffer *next;
+};
+
+struct mongodb_specific_data {
+ mongoc_client_t *client;
+ mongoc_collection_t *collection;
+
+ size_t total_documents_inserted;
+
+ struct bson_buffer *first_buffer;
+ struct bson_buffer *last_buffer;
+};
+
+int mongodb_init(struct instance *instance);
+void mongodb_cleanup(struct instance *instance);
+
+int init_mongodb_instance(struct instance *instance);
+int format_batch_mongodb(struct instance *instance);
+void mongodb_connector_worker(void *instance_p);
+
+#endif //NETDATA_EXPORTING_MONGODB_H
diff --git a/exporting/nc-exporting.sh b/exporting/nc-exporting.sh
new file mode 100755
index 000000000..9c8b08e60
--- /dev/null
+++ b/exporting/nc-exporting.sh
@@ -0,0 +1,158 @@
+#!/usr/bin/env bash
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This is a simple backend database proxy, written in BASH, using the nc command.
+# Run the script without any parameters for help.
+
+MODE="${1}"
+MY_PORT="${2}"
+BACKEND_HOST="${3}"
+BACKEND_PORT="${4}"
+FILE="${NETDATA_NC_BACKEND_DIR-/tmp}/netdata-nc-backend-${MY_PORT}"
+
+log() {
+ logger --stderr --id=$$ --tag "netdata-nc-backend" "${*}"
+}
+
+mync() {
+ local ret
+
+ log "Running: nc ${*}"
+ nc "${@}"
+ ret=$?
+
+ log "nc stopped with return code ${ret}."
+
+ return ${ret}
+}
+
+listen_save_replay_forever() {
+ local file="${1}" port="${2}" real_backend_host="${3}" real_backend_port="${4}" ret delay=1 started ended
+
+ while true
+ do
+ log "Starting nc to listen on port ${port} and save metrics to ${file}"
+
+ started=$(date +%s)
+ mync -l -p "${port}" | tee -a -p --output-error=exit "${file}"
+ ended=$(date +%s)
+
+ if [ -s "${file}" ]
+ then
+ if [ -n "${real_backend_host}" ] && [ -n "${real_backend_port}" ]
+ then
+ log "Attempting to send the metrics to the real backend at ${real_backend_host}:${real_backend_port}"
+
+ mync "${real_backend_host}" "${real_backend_port}" <"${file}"
+ ret=$?
+
+ if [ ${ret} -eq 0 ]
+ then
+ log "Successfuly sent the metrics to ${real_backend_host}:${real_backend_port}"
+ mv "${file}" "${file}.old"
+ touch "${file}"
+ else
+ log "Failed to send the metrics to ${real_backend_host}:${real_backend_port} (nc returned ${ret}) - appending more data to ${file}"
+ fi
+ else
+ log "No backend configured - appending more data to ${file}"
+ fi
+ fi
+
+ # prevent a CPU hungry infinite loop
+ # if nc cannot listen to port
+ if [ $((ended - started)) -lt 5 ]
+ then
+ log "nc has been stopped too fast."
+ delay=30
+ else
+ delay=1
+ fi
+
+ log "Waiting ${delay} seconds before listening again for data."
+ sleep ${delay}
+ done
+}
+
+if [ "${MODE}" = "start" ]
+ then
+
+ # start the listener, in exclusive mode
+ # only one can use the same file/port at a time
+ {
+ flock -n 9
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]
+ then
+ log "Cannot get exclusive lock on file ${FILE}.lock - Am I running multiple times?"
+ exit 2
+ fi
+
+ # save our PID to the lock file
+ echo "$$" >"${FILE}.lock"
+
+ listen_save_replay_forever "${FILE}" "${MY_PORT}" "${BACKEND_HOST}" "${BACKEND_PORT}"
+ ret=$?
+
+ log "listener exited."
+ exit ${ret}
+
+ } 9>>"${FILE}.lock"
+
+ # we can only get here if ${FILE}.lock cannot be created
+ log "Cannot create file ${FILE}."
+ exit 3
+
+elif [ "${MODE}" = "stop" ]
+ then
+
+ {
+ flock -n 9
+ # shellcheck disable=SC2181
+ if [ $? -ne 0 ]
+ then
+ pid=$(<"${FILE}".lock)
+ log "Killing process ${pid}..."
+ kill -TERM "-${pid}"
+ exit 0
+ fi
+
+ log "File ${FILE}.lock has been locked by me but it shouldn't. Is a collector running?"
+ exit 4
+
+ } 9<"${FILE}.lock"
+
+ log "File ${FILE}.lock does not exist. Is a collector running?"
+ exit 5
+
+else
+
+ cat <<EOF
+Usage:
+
+ "${0}" start|stop PORT [BACKEND_HOST BACKEND_PORT]
+
+ PORT The port this script will listen
+ (configure netdata to use this as a second backend)
+
+ BACKEND_HOST The real backend host
+ BACKEND_PORT The real backend port
+
+ This script can act as fallback backend for netdata.
+ It will receive metrics from netdata, save them to
+ ${FILE}
+ and once netdata reconnects to the real-backend, this script
+ will push all metrics collected to the real-backend too and
+ wait for a failure to happen again.
+
+ Only one netdata can connect to this script at a time.
+ If you need fallback for multiple netdata, run this script
+ multiple times with different ports.
+
+ You can run me in the background with this:
+
+ screen -d -m "${0}" start PORT [BACKEND_HOST BACKEND_PORT]
+EOF
+ exit 1
+fi
diff --git a/exporting/opentsdb/Makefile.am b/exporting/opentsdb/Makefile.am
new file mode 100644
index 000000000..161784b8f
--- /dev/null
+++ b/exporting/opentsdb/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/exporting/opentsdb/README.md b/exporting/opentsdb/README.md
new file mode 100644
index 000000000..3765ad271
--- /dev/null
+++ b/exporting/opentsdb/README.md
@@ -0,0 +1,30 @@
+<!--
+title: "Export metrics to OpenTSDB"
+description: "Archive your Agent's metrics to an OpenTSDB database for long-term storage and further analysis."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/opentsdb/README.md
+sidebar_label: OpenTSDB
+-->
+
+# Export metrics to OpenTSDB
+
+You can use the OpenTSDB connector for the [exporting engine](/exporting/README.md) to archive your agent's metrics to OpenTSDB
+databases for long-term storage, further analysis, or correlation with data from other sources.
+
+## Configuration
+
+To enable data exporting to an OpenTSDB database, run `./edit-config exporting.conf` in the Netdata configuration
+directory and set the following options:
+
+```conf
+[opentsdb:my_opentsdb_instance]
+ enabled = yes
+ destination = localhost:4242
+```
+
+Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For example: `opentsdb:http:my_opentsdb_instance`,
+`opentsdb:https:my_opentsdb_instance`.
+
+The OpenTSDB connector is further configurable using additional settings. See the [exporting reference
+doc](/exporting/README.md#options) for details.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Fopentsdb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/opentsdb/opentsdb.c b/exporting/opentsdb/opentsdb.c
new file mode 100644
index 000000000..d7b843dff
--- /dev/null
+++ b/exporting/opentsdb/opentsdb.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "opentsdb.h"
+#include "../json/json.h"
+
+/**
+ * Initialize OpenTSDB telnet connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_opentsdb_telnet_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ struct simple_connector_config *connector_specific_config = callocz(1, sizeof(struct simple_connector_config));
+ instance->config.connector_specific_config = (void *)connector_specific_config;
+ connector_specific_config->default_port = 4242;
+
+ struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = connector_specific_data;
+
+#ifdef ENABLE_HTTPS
+ connector_specific_data->flags = NETDATA_SSL_START;
+ connector_specific_data->conn = NULL;
+ if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
+ security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ }
+#endif
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_labels_opentsdb_telnet;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_opentsdb_telnet;
+ else
+ instance->metric_formatting = format_dimension_stored_opentsdb_telnet;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = simple_connector_end_batch;
+
+ instance->prepare_header = NULL;
+ instance->check_response = exporting_discard_response;
+
+ instance->buffer = (void *)buffer_create(0);
+ if (!instance->buffer) {
+ error("EXPORTING: cannot create buffer for opentsdb telnet exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+
+ simple_connector_init(instance);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Initialize OpenTSDB HTTP connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_opentsdb_http_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ struct simple_connector_config *connector_specific_config = callocz(1, sizeof(struct simple_connector_config));
+ instance->config.connector_specific_config = (void *)connector_specific_config;
+ connector_specific_config->default_port = 4242;
+
+ struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
+#ifdef ENABLE_HTTPS
+ connector_specific_data->flags = NETDATA_SSL_START;
+ connector_specific_data->conn = NULL;
+ if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
+ security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ }
+#endif
+ instance->connector_specific_data = connector_specific_data;
+
+ instance->start_batch_formatting = open_batch_json_http;
+ instance->start_host_formatting = format_host_labels_opentsdb_http;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_opentsdb_http;
+ else
+ instance->metric_formatting = format_dimension_stored_opentsdb_http;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = close_batch_json_http;
+
+ instance->prepare_header = opentsdb_http_prepare_header;
+ instance->check_response = exporting_discard_response;
+
+ instance->buffer = (void *)buffer_create(0);
+ if (!instance->buffer) {
+ error("EXPORTING: cannot create buffer for opentsdb HTTP exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+
+ simple_connector_init(instance);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Copy a label value and substitute underscores in place of charachters which can't be used in OpenTSDB output
+ *
+ * @param dst a destination string.
+ * @param src a source string.
+ * @param len the maximum number of characters copied.
+ */
+
+void sanitize_opentsdb_label_value(char *dst, char *src, size_t len)
+{
+ while (*src != '\0' && len) {
+ if (isalpha(*src) || isdigit(*src) || *src == '-' || *src == '_' || *src == '.' || *src == '/' || IS_UTF8_BYTE(*src))
+ *dst++ = *src;
+ else
+ *dst++ = '_';
+ src++;
+ len--;
+ }
+ *dst = '\0';
+}
+
+/**
+ * Format host labels for JSON connector
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @return Always returns 0.
+ */
+int format_host_labels_opentsdb_telnet(struct instance *instance, RRDHOST *host)
+{
+ if (!instance->labels)
+ instance->labels = buffer_create(1024);
+
+ if (unlikely(!sending_labels_configured(instance)))
+ return 0;
+
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ for (struct label *label = host->labels.head; label; label = label->next) {
+ if (!should_send_label(instance, label))
+ continue;
+
+ char value[CONFIG_MAX_VALUE + 1];
+ sanitize_opentsdb_label_value(value, label->value, CONFIG_MAX_VALUE);
+
+ if (*value)
+ buffer_sprintf(instance->labels, " %s=%s", label->key, value);
+ }
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+
+ return 0;
+}
+
+/**
+ * Format dimension using collected data for OpenTSDB telnet connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_collected_opentsdb_telnet(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ chart_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? st->name : st->id,
+ RRD_ID_LENGTH_MAX);
+
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ dimension_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ RRD_ID_LENGTH_MAX);
+
+ buffer_sprintf(
+ instance->buffer,
+ "put %s.%s.%s %llu " COLLECTED_NUMBER_FORMAT " host=%s%s%s%s\n",
+ instance->config.prefix,
+ chart_name,
+ dimension_name,
+ (unsigned long long)rd->last_collected_time.tv_sec,
+ rd->last_collected_value,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ (host->tags) ? " " : "",
+ (host->tags) ? host->tags : "",
+ (instance->labels) ? buffer_tostring(instance->labels) : "");
+
+ return 0;
+}
+
+/**
+ * Format dimension using a calculated value from stored data for OpenTSDB telnet connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_stored_opentsdb_telnet(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ chart_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? st->name : st->id,
+ RRD_ID_LENGTH_MAX);
+
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ dimension_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ RRD_ID_LENGTH_MAX);
+
+ time_t last_t;
+ calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
+
+ if(isnan(value))
+ return 0;
+
+ buffer_sprintf(
+ instance->buffer,
+ "put %s.%s.%s %llu " CALCULATED_NUMBER_FORMAT " host=%s%s%s%s\n",
+ instance->config.prefix,
+ chart_name,
+ dimension_name,
+ (unsigned long long)last_t,
+ value,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ (host->tags) ? " " : "",
+ (host->tags) ? host->tags : "",
+ (instance->labels) ? buffer_tostring(instance->labels) : "");
+
+ return 0;
+}
+
+/**
+ * Ppepare HTTP header
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+void opentsdb_http_prepare_header(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data = instance->connector_specific_data;
+
+ buffer_sprintf(
+ simple_connector_data->last_buffer->header,
+ "POST /api/put HTTP/1.1\r\n"
+ "Host: %s\r\n"
+ "Content-Type: application/json\r\n"
+ "Content-Length: %lu\r\n"
+ "\r\n",
+ instance->config.destination,
+ buffer_strlen(simple_connector_data->last_buffer->buffer));
+
+ return;
+}
+
+/**
+ * Format host labels for OpenTSDB HTTP connector
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @return Always returns 0.
+ */
+int format_host_labels_opentsdb_http(struct instance *instance, RRDHOST *host)
+{
+ if (!instance->labels)
+ instance->labels = buffer_create(1024);
+
+ if (unlikely(!sending_labels_configured(instance)))
+ return 0;
+
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ for (struct label *label = host->labels.head; label; label = label->next) {
+ if (!should_send_label(instance, label))
+ continue;
+
+ char escaped_value[CONFIG_MAX_VALUE * 2 + 1];
+ sanitize_json_string(escaped_value, label->value, CONFIG_MAX_VALUE);
+
+ char value[CONFIG_MAX_VALUE + 1];
+ sanitize_opentsdb_label_value(value, escaped_value, CONFIG_MAX_VALUE);
+
+ if (*value) {
+ buffer_strcat(instance->labels, ",");
+ buffer_sprintf(instance->labels, "\"%s\":\"%s\"", label->key, value);
+ }
+ }
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+
+ return 0;
+}
+
+/**
+ * Format dimension using collected data for OpenTSDB HTTP connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_collected_opentsdb_http(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ chart_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? st->name : st->id,
+ RRD_ID_LENGTH_MAX);
+
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ dimension_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ RRD_ID_LENGTH_MAX);
+
+ if (buffer_strlen((BUFFER *)instance->buffer) > 2)
+ buffer_strcat(instance->buffer, ",\n");
+
+ buffer_sprintf(
+ instance->buffer,
+ "{"
+ "\"metric\":\"%s.%s.%s\","
+ "\"timestamp\":%llu,"
+ "\"value\":"COLLECTED_NUMBER_FORMAT","
+ "\"tags\":{"
+ "\"host\":\"%s%s%s\"%s"
+ "}"
+ "}",
+ instance->config.prefix,
+ chart_name,
+ dimension_name,
+ (unsigned long long)rd->last_collected_time.tv_sec,
+ rd->last_collected_value,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ (host->tags) ? " " : "",
+ (host->tags) ? host->tags : "",
+ instance->labels ? buffer_tostring(instance->labels) : "");
+
+ return 0;
+}
+
+/**
+ * Format dimension using a calculated value from stored data for OpenTSDB HTTP connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_stored_opentsdb_http(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ char chart_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ chart_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? st->name : st->id,
+ RRD_ID_LENGTH_MAX);
+
+ char dimension_name[RRD_ID_LENGTH_MAX + 1];
+ exporting_name_copy(
+ dimension_name,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ RRD_ID_LENGTH_MAX);
+
+ time_t last_t;
+ calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
+
+ if(isnan(value))
+ return 0;
+
+ if (buffer_strlen((BUFFER *)instance->buffer) > 2)
+ buffer_strcat(instance->buffer, ",\n");
+
+ buffer_sprintf(
+ instance->buffer,
+ "{"
+ "\"metric\":\"%s.%s.%s\","
+ "\"timestamp\":%llu,"
+ "\"value\":"CALCULATED_NUMBER_FORMAT","
+ "\"tags\":{"
+ "\"host\":\"%s%s%s\"%s"
+ "}"
+ "}",
+ instance->config.prefix,
+ chart_name,
+ dimension_name,
+ (unsigned long long)last_t,
+ value,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ (host->tags) ? " " : "",
+ (host->tags) ? host->tags : "",
+ instance->labels ? buffer_tostring(instance->labels) : "");
+
+ return 0;
+}
diff --git a/exporting/opentsdb/opentsdb.h b/exporting/opentsdb/opentsdb.h
new file mode 100644
index 000000000..d53a5054f
--- /dev/null
+++ b/exporting/opentsdb/opentsdb.h
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_OPENTSDB_H
+#define NETDATA_EXPORTING_OPENTSDB_H
+
+#include "exporting/exporting_engine.h"
+
+int init_opentsdb_telnet_instance(struct instance *instance);
+int init_opentsdb_http_instance(struct instance *instance);
+
+void sanitize_opentsdb_label_value(char *dst, char *src, size_t len);
+int format_host_labels_opentsdb_telnet(struct instance *instance, RRDHOST *host);
+int format_host_labels_opentsdb_http(struct instance *instance, RRDHOST *host);
+
+int format_dimension_collected_opentsdb_telnet(struct instance *instance, RRDDIM *rd);
+int format_dimension_stored_opentsdb_telnet(struct instance *instance, RRDDIM *rd);
+
+int format_dimension_collected_opentsdb_http(struct instance *instance, RRDDIM *rd);
+int format_dimension_stored_opentsdb_http(struct instance *instance, RRDDIM *rd);
+
+int open_batch_opentsdb_http(struct instance *instance);
+int close_batch_opentsdb_http(struct instance *instance);
+
+void opentsdb_http_prepare_header(struct instance *instance);
+
+#endif //NETDATA_EXPORTING_OPENTSDB_H
diff --git a/exporting/process_data.c b/exporting/process_data.c
new file mode 100644
index 000000000..5e11b3948
--- /dev/null
+++ b/exporting/process_data.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+
+/**
+ * Normalize chart and dimension names
+ *
+ * Substitute '_' for any special character except '.'.
+ *
+ * @param dst where to copy name to.
+ * @param src where to copy name from.
+ * @param max_len the maximum size of copied name.
+ * @return Returns the size of the copied name.
+ */
+size_t exporting_name_copy(char *dst, const char *src, size_t max_len)
+{
+ size_t n;
+
+ for (n = 0; *src && n < max_len; dst++, src++, n++) {
+ char c = *src;
+
+ if (c != '.' && !isalnum(c))
+ *dst = '_';
+ else
+ *dst = c;
+ }
+ *dst = '\0';
+
+ return n;
+}
+
+/**
+ * Mark scheduled instances
+ *
+ * Any instance can have its own update interval. On every exporting engine update only those instances are picked,
+ * which are scheduled for the update.
+ *
+ * @param engine an engine data structure.
+ * @return Returns 1 if there are instances to process
+ */
+int mark_scheduled_instances(struct engine *engine)
+{
+ int instances_were_scheduled = 0;
+
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (!instance->disabled && (engine->now % instance->config.update_every >=
+ instance->config.update_every - localhost->rrd_update_every)) {
+ instance->scheduled = 1;
+ instances_were_scheduled = 1;
+ instance->before = engine->now;
+ }
+ }
+
+ return instances_were_scheduled;
+}
+
+/**
+ * Calculate the SUM or AVERAGE of a dimension, for any timeframe
+ *
+ * May return NAN if the database does not have any value in the give timeframe.
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension(metric) in the Netdata database.
+ * @param last_timestamp the timestamp that should be reported to the exporting connector instance.
+ * @return Returns the value, calculated over the given period.
+ */
+calculated_number exporting_calculate_value_from_stored_data(
+ struct instance *instance,
+ RRDDIM *rd,
+ time_t *last_timestamp)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+ time_t after = instance->after;
+ time_t before = instance->before;
+
+ // find the edges of the rrd database for this chart
+ time_t first_t = rd->state->query_ops.oldest_time(rd);
+ time_t last_t = rd->state->query_ops.latest_time(rd);
+ time_t update_every = st->update_every;
+ struct rrddim_query_handle handle;
+ storage_number n;
+
+ // step back a little, to make sure we have complete data collection
+ // for all metrics
+ after -= update_every * 2;
+ before -= update_every * 2;
+
+ // align the time-frame
+ after = after - (after % update_every);
+ before = before - (before % update_every);
+
+ // for before, loose another iteration
+ // the latest point will be reported the next time
+ before -= update_every;
+
+ if (unlikely(after > before))
+ // this can happen when update_every > before - after
+ after = before;
+
+ if (unlikely(after < first_t))
+ after = first_t;
+
+ if (unlikely(before > last_t))
+ before = last_t;
+
+ if (unlikely(before < first_t || after > last_t)) {
+ // the chart has not been updated in the wanted timeframe
+ debug(
+ D_BACKEND,
+ "EXPORTING: %s.%s.%s: aligned timeframe %lu to %lu is outside the chart's database range %lu to %lu",
+ host->hostname,
+ st->id,
+ rd->id,
+ (unsigned long)after,
+ (unsigned long)before,
+ (unsigned long)first_t,
+ (unsigned long)last_t);
+ return NAN;
+ }
+
+ *last_timestamp = before;
+
+ size_t counter = 0;
+ calculated_number sum = 0;
+
+ for (rd->state->query_ops.init(rd, &handle, after, before); !rd->state->query_ops.is_finished(&handle);) {
+ time_t curr_t;
+ n = rd->state->query_ops.next_metric(&handle, &curr_t);
+
+ if (unlikely(!does_storage_number_exist(n))) {
+ // not collected
+ continue;
+ }
+
+ calculated_number value = unpack_storage_number(n);
+ sum += value;
+
+ counter++;
+ }
+ rd->state->query_ops.finalize(&handle);
+ if (unlikely(!counter)) {
+ debug(
+ D_BACKEND,
+ "EXPORTING: %s.%s.%s: no values stored in database for range %lu to %lu",
+ host->hostname,
+ st->id,
+ rd->id,
+ (unsigned long)after,
+ (unsigned long)before);
+ return NAN;
+ }
+
+ if (unlikely(EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_SUM))
+ return sum;
+
+ return sum / (calculated_number)counter;
+}
+
+/**
+ * Start batch formatting for every connector instance's buffer
+ *
+ * @param engine an engine data structure.
+ */
+void start_batch_formatting(struct engine *engine)
+{
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (instance->scheduled) {
+ uv_mutex_lock(&instance->mutex);
+ if (instance->start_batch_formatting && instance->start_batch_formatting(instance) != 0) {
+ error("EXPORTING: cannot start batch formatting for %s", instance->config.name);
+ disable_instance(instance);
+ }
+ }
+ }
+}
+
+/**
+ * Start host formatting for every connector instance's buffer
+ *
+ * @param engine an engine data structure.
+ * @param host a data collecting host.
+ */
+void start_host_formatting(struct engine *engine, RRDHOST *host)
+{
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (instance->scheduled) {
+ if (rrdhost_is_exportable(instance, host)) {
+ if (instance->start_host_formatting && instance->start_host_formatting(instance, host) != 0) {
+ error("EXPORTING: cannot start host formatting for %s", instance->config.name);
+ disable_instance(instance);
+ }
+ } else {
+ instance->skip_host = 1;
+ }
+ }
+ }
+}
+
+/**
+ * Start chart formatting for every connector instance's buffer
+ *
+ * @param engine an engine data structure.
+ * @param st a chart.
+ */
+void start_chart_formatting(struct engine *engine, RRDSET *st)
+{
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (instance->scheduled && !instance->skip_host) {
+ if (rrdset_is_exportable(instance, st)) {
+ if (instance->start_chart_formatting && instance->start_chart_formatting(instance, st) != 0) {
+ error("EXPORTING: cannot start chart formatting for %s", instance->config.name);
+ disable_instance(instance);
+ }
+ } else {
+ instance->skip_chart = 1;
+ }
+ }
+ }
+}
+
+/**
+ * Format metric for every connector instance's buffer
+ *
+ * @param engine an engine data structure.
+ * @param rd a dimension(metric) in the Netdata database.
+ */
+void metric_formatting(struct engine *engine, RRDDIM *rd)
+{
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (instance->scheduled && !instance->skip_host && !instance->skip_chart) {
+ if (instance->metric_formatting && instance->metric_formatting(instance, rd) != 0) {
+ error("EXPORTING: cannot format metric for %s", instance->config.name);
+ disable_instance(instance);
+ continue;
+ }
+ instance->stats.buffered_metrics++;
+ }
+ }
+}
+
+/**
+ * End chart formatting for every connector instance's buffer
+ *
+ * @param engine an engine data structure.
+ * @param a chart.
+ */
+void end_chart_formatting(struct engine *engine, RRDSET *st)
+{
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (instance->scheduled && !instance->skip_host && !instance->skip_chart) {
+ if (instance->end_chart_formatting && instance->end_chart_formatting(instance, st) != 0) {
+ error("EXPORTING: cannot end chart formatting for %s", instance->config.name);
+ disable_instance(instance);
+ continue;
+ }
+ }
+ instance->skip_chart = 0;
+ }
+}
+
+/**
+ * End host formatting for every connector instance's buffer
+ *
+ * @param engine an engine data structure.
+ * @param host a data collecting host.
+ */
+void end_host_formatting(struct engine *engine, RRDHOST *host)
+{
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (instance->scheduled && !instance->skip_host) {
+ if (instance->end_host_formatting && instance->end_host_formatting(instance, host) != 0) {
+ error("EXPORTING: cannot end host formatting for %s", instance->config.name);
+ disable_instance(instance);
+ continue;
+ }
+ }
+ instance->skip_host = 0;
+ }
+}
+
+/**
+ * End batch formatting for every connector instance's buffer
+ *
+ * @param engine an engine data structure.
+ */
+void end_batch_formatting(struct engine *engine)
+{
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ if (instance->scheduled) {
+ if (instance->end_batch_formatting && instance->end_batch_formatting(instance) != 0) {
+ error("EXPORTING: cannot end batch formatting for %s", instance->config.name);
+ disable_instance(instance);
+ continue;
+ }
+ uv_mutex_unlock(&instance->mutex);
+ instance->data_is_ready = 1;
+ uv_cond_signal(&instance->cond_var);
+
+ instance->scheduled = 0;
+ instance->after = instance->before;
+ }
+ }
+}
+
+/**
+ * Prepare buffers
+ *
+ * Walk through the Netdata database and fill buffers for every scheduled exporting connector instance according to
+ * configured rules.
+ *
+ * @param engine an engine data structure.
+ */
+void prepare_buffers(struct engine *engine)
+{
+ netdata_thread_disable_cancelability();
+ start_batch_formatting(engine);
+
+ rrd_rdlock();
+ RRDHOST *host;
+ rrdhost_foreach_read(host)
+ {
+ rrdhost_rdlock(host);
+ start_host_formatting(engine, host);
+ RRDSET *st;
+ rrdset_foreach_read(st, host)
+ {
+ rrdset_rdlock(st);
+ start_chart_formatting(engine, st);
+
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st)
+ metric_formatting(engine, rd);
+
+ end_chart_formatting(engine, st);
+ rrdset_unlock(st);
+ }
+
+ end_host_formatting(engine, host);
+ rrdhost_unlock(host);
+ }
+ rrd_unlock();
+ netdata_thread_enable_cancelability();
+
+ end_batch_formatting(engine);
+}
+
+/**
+ * Flush a buffer with host labels
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @return Always returns 0.
+ */
+int flush_host_labels(struct instance *instance, RRDHOST *host)
+{
+ (void)host;
+
+ if (instance->labels)
+ buffer_flush(instance->labels);
+
+ return 0;
+}
+
+/**
+ * End a batch for a simple connector
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int simple_connector_end_batch(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+ struct stats *stats = &instance->stats;
+
+ BUFFER *instance_buffer = (BUFFER *)instance->buffer;
+ struct simple_connector_buffer *last_buffer = simple_connector_data->last_buffer;
+
+ if (!last_buffer->buffer) {
+ last_buffer->buffer = buffer_create(0);
+ }
+
+ if (last_buffer->used) {
+ // ring buffer is full, reuse the oldest element
+ simple_connector_data->first_buffer = simple_connector_data->first_buffer->next;
+
+ stats->data_lost_events++;
+ stats->lost_metrics += last_buffer->buffered_metrics;
+ stats->lost_bytes += last_buffer->buffered_bytes;
+ }
+
+ // swap buffers
+ BUFFER *tmp_buffer = last_buffer->buffer;
+ last_buffer->buffer = instance_buffer;
+ instance->buffer = instance_buffer = tmp_buffer;
+
+ buffer_flush(instance_buffer);
+
+ if (last_buffer->header)
+ buffer_flush(last_buffer->header);
+ else
+ last_buffer->header = buffer_create(0);
+
+ if (instance->prepare_header)
+ instance->prepare_header(instance);
+
+ // The stats->buffered_metrics is used in the simple connector batch formatting as a variable for the number
+ // of metrics, added in the current iteration, so we are clearing it here. We will use the
+ // simple_connector_data->total_buffered_metrics in the worker to show the statistics.
+ size_t buffered_metrics = (size_t)stats->buffered_metrics;
+ stats->buffered_metrics = 0;
+
+ size_t buffered_bytes = buffer_strlen(last_buffer->buffer);
+
+ last_buffer->buffered_metrics = buffered_metrics;
+ last_buffer->buffered_bytes = buffered_bytes;
+ last_buffer->used++;
+
+ simple_connector_data->total_buffered_metrics += buffered_metrics;
+ stats->buffered_bytes += buffered_bytes;
+
+ simple_connector_data->last_buffer = simple_connector_data->last_buffer->next;
+
+ return 0;
+}
diff --git a/exporting/prometheus/Makefile.am b/exporting/prometheus/Makefile.am
new file mode 100644
index 000000000..334fca81c
--- /dev/null
+++ b/exporting/prometheus/Makefile.am
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+SUBDIRS = \
+ remote_write \
+ $(NULL)
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/exporting/prometheus/README.md b/exporting/prometheus/README.md
new file mode 100644
index 000000000..d718a366e
--- /dev/null
+++ b/exporting/prometheus/README.md
@@ -0,0 +1,461 @@
+<!--
+title: "Export metrics to Prometheus"
+description: "Export Netdata metrics to Prometheus for archiving and further analysis."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/prometheus/README.md
+sidebar_label: Using Netdata with Prometheus
+-->
+
+# Using Netdata with Prometheus
+
+> IMPORTANT: the format Netdata sends metrics to Prometheus has changed since Netdata v1.7. The new Prometheus exporting
+> connector for Netdata supports a lot more features and is aligned to the development of the rest of the Netdata
+> exporting connectors.
+
+Prometheus is a distributed monitoring system which offers a very simple setup along with a robust data model. Recently
+Netdata added support for Prometheus. I'm going to quickly show you how to install both Netdata and Prometheus on the
+same server. We can then use Grafana pointed at Prometheus to obtain long term metrics Netdata offers. I'm assuming we
+are starting at a fresh ubuntu shell (whether you'd like to follow along in a VM or a cloud instance is up to you).
+
+## Installing Netdata and Prometheus
+
+### Installing Netdata
+
+There are number of ways to install Netdata according to [Installation](/packaging/installer/README.md). The suggested way
+of installing the latest Netdata and keep it upgrade automatically. Using one line installation:
+
+```sh
+bash <(curl -Ss https://my-netdata.io/kickstart.sh)
+```
+
+At this point we should have Netdata listening on port 19999. Attempt to take your browser here:
+
+```sh
+http://your.netdata.ip:19999
+```
+
+_(replace `your.netdata.ip` with the IP or hostname of the server running Netdata)_
+
+### Installing Prometheus
+
+In order to install Prometheus we are going to introduce our own systemd startup script along with an example of
+prometheus.yaml configuration. Prometheus needs to be pointed to your server at a specific target url for it to scrape
+Netdata's api. Prometheus is always a pull model meaning Netdata is the passive client within this architecture.
+Prometheus always initiates the connection with Netdata.
+
+#### Download Prometheus
+
+```sh
+cd /tmp && curl -s https://api.github.com/repos/prometheus/prometheus/releases/latest \
+| grep "browser_download_url.*linux-amd64.tar.gz" \
+| cut -d '"' -f 4 \
+| wget -qi -
+```
+
+#### Create prometheus system user
+
+```sh
+sudo useradd -r prometheus
+```
+
+#### Create prometheus directory
+
+```sh
+sudo mkdir /opt/prometheus
+sudo chown prometheus:prometheus /opt/prometheus
+```
+
+#### Untar prometheus directory
+
+```sh
+sudo tar -xvf /tmp/prometheus-*linux-amd64.tar.gz -C /opt/prometheus --strip=1
+```
+
+#### Install prometheus.yml
+
+We will use the following `prometheus.yml` file. Save it at `/opt/prometheus/prometheus.yml`.
+
+Make sure to replace `your.netdata.ip` with the IP or hostname of the host running Netdata.
+
+```yaml
+# my global config
+global:
+ scrape_interval: 5s # Set the scrape interval to every 5 seconds. Default is every 1 minute.
+ evaluation_interval: 5s # Evaluate rules every 5 seconds. The default is every 1 minute.
+ # scrape_timeout is set to the global default (10s).
+
+ # Attach these labels to any time series or alerts when communicating with
+ # external systems (federation, remote storage, Alertmanager).
+ external_labels:
+ monitor: 'codelab-monitor'
+
+# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
+rule_files:
+ # - "first.rules"
+ # - "second.rules"
+
+# A scrape configuration containing exactly one endpoint to scrape:
+# Here it's Prometheus itself.
+scrape_configs:
+ # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+ - job_name: 'prometheus'
+
+ # metrics_path defaults to '/metrics'
+ # scheme defaults to 'http'.
+
+ static_configs:
+ - targets: ['0.0.0.0:9090']
+
+ - job_name: 'netdata-scrape'
+
+ metrics_path: '/api/v1/allmetrics'
+ params:
+ # format: prometheus | prometheus_all_hosts
+ # You can use `prometheus_all_hosts` if you want Prometheus to set the `instance` to your hostname instead of IP
+ format: [prometheus]
+ #
+ # sources: as-collected | raw | average | sum | volume
+ # default is: average
+ #source: [as-collected]
+ #
+ # server name for this prometheus - the default is the client IP
+ # for Netdata to uniquely identify it
+ #server: ['prometheus1']
+ honor_labels: true
+
+ static_configs:
+ - targets: ['{your.netdata.ip}:19999']
+```
+
+#### Install nodes.yml
+
+The following is completely optional, it will enable Prometheus to generate alerts from some NetData sources. Tweak the
+values to your own needs. We will use the following `nodes.yml` file below. Save it at `/opt/prometheus/nodes.yml`, and
+add a _- "nodes.yml"_ entry under the _rule_files:_ section in the example prometheus.yml file above.
+
+```yaml
+groups:
+- name: nodes
+
+ rules:
+ - alert: node_high_cpu_usage_70
+ expr: avg(rate(netdata_cpu_cpu_percentage_average{dimension="idle"}[1m])) by (job) > 70
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.'
+ summary: CPU alert for container node '{{ $labels.job }}'
+
+ - alert: node_high_memory_usage_70
+ expr: 100 / sum(netdata_system_ram_MB_average) by (job)
+ * sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.'
+ summary: Memory alert for container node '{{ $labels.job }}'
+
+ - alert: node_low_root_filesystem_space_20
+ expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job)
+ * sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.'
+ summary: Root filesystem alert for container node '{{ $labels.job }}'
+
+ - alert: node_root_filesystem_fill_rate_6h
+ expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0
+ for: 1h
+ labels:
+ severity: critical
+ annotations:
+ description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h.
+ summary: Disk fill alert for Swarm node '{{ $labels.job }}'
+```
+
+#### Install prometheus.service
+
+Save this service file as `/etc/systemd/system/prometheus.service`:
+
+```sh
+[Unit]
+Description=Prometheus Server
+AssertPathExists=/opt/prometheus
+
+[Service]
+Type=simple
+WorkingDirectory=/opt/prometheus
+User=prometheus
+Group=prometheus
+ExecStart=/opt/prometheus/prometheus --config.file=/opt/prometheus/prometheus.yml --log.level=info
+ExecReload=/bin/kill -SIGHUP $MAINPID
+ExecStop=/bin/kill -SIGINT $MAINPID
+
+[Install]
+WantedBy=multi-user.target
+```
+
+##### Start Prometheus
+
+```sh
+sudo systemctl start prometheus
+sudo systemctl enable prometheus
+```
+
+Prometheus should now start and listen on port 9090. Attempt to head there with your browser.
+
+If everything is working correctly when you fetch `http://your.prometheus.ip:9090` you will see a 'Status' tab. Click
+this and click on 'targets' We should see the Netdata host as a scraped target.
+
+---
+
+## Netdata support for Prometheus
+
+> IMPORTANT: the format Netdata sends metrics to Prometheus has changed since Netdata v1.6. The new format allows easier
+> queries for metrics and supports both `as collected` and normalized metrics.
+
+Before explaining the changes, we have to understand the key differences between Netdata and Prometheus.
+
+### understanding Netdata metrics
+
+#### charts
+
+Each chart in Netdata has several properties (common to all its metrics):
+
+- `chart_id` - uniquely identifies a chart.
+
+- `chart_name` - a more human friendly name for `chart_id`, also unique.
+
+- `context` - this is the template of the chart. All disk I/O charts have the same context, all mysql requests charts
+ have the same context, etc. This is used for alarm templates to match all the charts they should be attached to.
+
+- `family` groups a set of charts together. It is used as the submenu of the dashboard.
+
+- `units` is the units for all the metrics attached to the chart.
+
+#### dimensions
+
+Then each Netdata chart contains metrics called `dimensions`. All the dimensions of a chart have the same units of
+measurement, and are contextually in the same category (ie. the metrics for disk bandwidth are `read` and `write` and
+they are both in the same chart).
+
+### Netdata data source
+
+Netdata can send metrics to Prometheus from 3 data sources:
+
+- `as collected` or `raw` - this data source sends the metrics to Prometheus as they are collected. No conversion is
+ done by Netdata. The latest value for each metric is just given to Prometheus. This is the most preferred method by
+ Prometheus, but it is also the harder to work with. To work with this data source, you will need to understand how
+ to get meaningful values out of them.
+
+ The format of the metrics is: `CONTEXT{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+
+ If the metric is a counter (`incremental` in Netdata lingo), `_total` is appended the context.
+
+ Unlike Prometheus, Netdata allows each dimension of a chart to have a different algorithm and conversion constants
+ (`multiplier` and `divisor`). In this case, that the dimensions of a charts are heterogeneous, Netdata will use this
+ format: `CONTEXT_DIMENSION{chart="CHART",family="FAMILY"}`
+
+- `average` - this data source uses the Netdata database to send the metrics to Prometheus as they are presented on
+ the Netdata dashboard. So, all the metrics are sent as gauges, at the units they are presented in the Netdata
+ dashboard charts. This is the easiest to work with.
+
+ The format of the metrics is: `CONTEXT_UNITS_average{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+
+ When this source is used, Netdata keeps track of the last access time for each Prometheus server fetching the
+ metrics. This last access time is used at the subsequent queries of the same Prometheus server to identify the
+ time-frame the `average` will be calculated.
+
+ So, no matter how frequently Prometheus scrapes Netdata, it will get all the database data.
+ To identify each Prometheus server, Netdata uses by default the IP of the client fetching the metrics.
+
+ If there are multiple Prometheus servers fetching data from the same Netdata, using the same IP, each Prometheus
+ server can append `server=NAME` to the URL. Netdata will use this `NAME` to uniquely identify the Prometheus server.
+
+- `sum` or `volume`, is like `average` but instead of averaging the values, it sums them.
+
+ The format of the metrics is: `CONTEXT_UNITS_sum{chart="CHART",family="FAMILY",dimension="DIMENSION"}`. All the
+ other operations are the same with `average`.
+
+ To change the data source to `sum` or `as-collected` you need to provide the `source` parameter in the request URL.
+ e.g.: `http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes&source=as-collected`
+
+ Keep in mind that early versions of Netdata were sending the metrics as: `CHART_DIMENSION{}`.
+
+### Querying Metrics
+
+Fetch with your web browser this URL:
+
+`http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes`
+
+_(replace `your.netdata.ip` with the ip or hostname of your Netdata server)_
+
+Netdata will respond with all the metrics it sends to Prometheus.
+
+If you search that page for `"system.cpu"` you will find all the metrics Netdata is exporting to Prometheus for this
+chart. `system.cpu` is the chart name on the Netdata dashboard (on the Netdata dashboard all charts have a text heading
+such as : `Total CPU utilization (system.cpu)`. What we are interested here in the chart name: `system.cpu`).
+
+Searching for `"system.cpu"` reveals:
+
+```sh
+# COMMENT homogeneous chart "system.cpu", context "system.cpu", family "cpu", units "percentage"
+# COMMENT netdata_system_cpu_percentage_average: dimension "guest_nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest_nice"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "guest", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest"} 1.7837326 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "steal", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="steal"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "softirq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="softirq"} 0.5275442 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "irq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="irq"} 0.2260836 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "user", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="user"} 2.3362762 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 1.7961062 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="nice"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "iowait", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="iowait"} 0.9671802 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "idle", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="idle"} 92.3630770 1500066662000
+```
+
+_(Netdata response for `system.cpu` with source=`average`)_
+
+In `average` or `sum` data sources, all values are normalized and are reported to Prometheus as gauges. Now, use the
+'expression' text form in Prometheus. Begin to type the metrics we are looking for: `netdata_system_cpu`. You should see
+that the text form begins to auto-fill as Prometheus knows about this metric.
+
+If the data source was `as collected`, the response would be:
+
+```sh
+# COMMENT homogeneous chart "system.cpu", context "system.cpu", family "cpu", units "percentage"
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest_nice", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest_nice"} 0 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest"} 63945 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "steal", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="steal"} 0 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "softirq", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="softirq"} 8295 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "irq", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="irq"} 4079 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "user", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="user"} 116488 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "system", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="system"} 35084 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "nice", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="nice"} 505 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "iowait", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="iowait"} 23314 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "idle", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="idle"} 918470 1500066716438
+```
+
+_(Netdata response for `system.cpu` with source=`as-collected`)_
+
+For more information check Prometheus documentation.
+
+### Streaming data from upstream hosts
+
+The `format=prometheus` parameter only exports the host's Netdata metrics. If you are using the parent-child
+functionality of Netdata this ignores any upstream hosts - so you should consider using the below in your
+**prometheus.yml**:
+
+```yaml
+ metrics_path: '/api/v1/allmetrics'
+ params:
+ format: [prometheus_all_hosts]
+ honor_labels: true
+```
+
+This will report all upstream host data, and `honor_labels` will make Prometheus take note of the instance names
+provided.
+
+### Timestamps
+
+To pass the metrics through Prometheus pushgateway, Netdata supports the option `&timestamps=no` to send the metrics
+without timestamps.
+
+## Netdata host variables
+
+Netdata collects various system configuration metrics, like the max number of TCP sockets supported, the max number of
+files allowed system-wide, various IPC sizes, etc. These metrics are not exposed to Prometheus by default.
+
+To expose them, append `variables=yes` to the Netdata URL.
+
+### TYPE and HELP
+
+To save bandwidth, and because Prometheus does not use them anyway, `# TYPE` and `# HELP` lines are suppressed. If
+wanted they can be re-enabled via `types=yes` and `help=yes`, e.g.
+`/api/v1/allmetrics?format=prometheus&types=yes&help=yes`
+
+Note that if enabled, the `# TYPE` and `# HELP` lines are repeated for every occurrence of a metric, which goes against the Prometheus documentation's [specification for these lines](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#comments-help-text-and-type-information).
+
+### Names and IDs
+
+Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and
+names are human friendly labels (also unique).
+
+Most charts and metrics have the same ID and name, but in several cases they are different: disks with device-mapper,
+interrupts, QoS classes, statsd synthetic charts, etc.
+
+The default is controlled in `exporting.conf`:
+
+```conf
+[prometheus:exporter]
+ send names instead of ids = yes | no
+```
+
+You can overwrite it from Prometheus, by appending to the URL:
+
+- `&names=no` to get IDs (the old behaviour)
+- `&names=yes` to get names
+
+### Filtering metrics sent to Prometheus
+
+Netdata can filter the metrics it sends to Prometheus with this setting:
+
+```conf
+[prometheus:exporter]
+ send charts matching = *
+```
+
+This settings accepts a space separated list of [simple patterns](/libnetdata/simple_pattern/README.md) to match the
+**charts** to be sent to Prometheus. Each pattern can use `*` as wildcard, any number of times (e.g `*a*b*c*` is valid).
+Patterns starting with `!` give a negative match (e.g `!*.bad users.* groups.*` will send all the users and groups
+except `bad` user and `bad` group). The order is important: the first match (positive or negative) left to right, is
+used.
+
+### Changing the prefix of Netdata metrics
+
+Netdata sends all metrics prefixed with `netdata_`. You can change this in `netdata.conf`, like this:
+
+```conf
+[prometheus:exporter]
+ prefix = netdata
+```
+
+It can also be changed from the URL, by appending `&prefix=netdata`.
+
+### Metric Units
+
+The default source `average` adds the unit of measurement to the name of each metric (e.g. `_KiB_persec`). To hide the
+units and get the same metric names as with the other sources, append to the URL `&hideunits=yes`.
+
+The units were standardized in v1.12, with the effect of changing the metric names. To get the metric names as they were
+before v1.12, append to the URL `&oldunits=yes`
+
+### Accuracy of `average` and `sum` data sources
+
+When the data source is set to `average` or `sum`, Netdata remembers the last access of each client accessing Prometheus
+metrics and uses this last access time to respond with the `average` or `sum` of all the entries in the database since
+that. This means that Prometheus servers are not losing data when they access Netdata with data source = `average` or
+`sum`.
+
+To uniquely identify each Prometheus server, Netdata uses the IP of the client accessing the metrics. If however the IP
+is not good enough for identifying a single Prometheus server (e.g. when Prometheus servers are accessing Netdata
+through a web proxy, or when multiple Prometheus servers are NATed to a single IP), each Prometheus may append
+`&server=NAME` to the URL. This `NAME` is used by Netdata to uniquely identify each Prometheus server and keep track of
+its last access time.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Fprometheus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/prometheus/prometheus.c b/exporting/prometheus/prometheus.c
new file mode 100644
index 000000000..371f5a520
--- /dev/null
+++ b/exporting/prometheus/prometheus.c
@@ -0,0 +1,916 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#define EXPORTINGS_INTERNALS
+#include "prometheus.h"
+
+// ----------------------------------------------------------------------------
+// PROMETHEUS
+// /api/v1/allmetrics?format=prometheus and /api/v1/allmetrics?format=prometheus_all_hosts
+
+/**
+ * Check if a chart can be sent to Prometheus
+ *
+ * @param instance an instance data structure.
+ * @param st a chart.
+ * @return Returns 1 if the chart can be sent, 0 otherwise.
+ */
+inline int can_send_rrdset(struct instance *instance, RRDSET *st)
+{
+ RRDHOST *host = st->rrdhost;
+
+ if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_BACKEND_IGNORE)))
+ return 0;
+
+ if (unlikely(!rrdset_flag_check(st, RRDSET_FLAG_BACKEND_SEND))) {
+ // we have not checked this chart
+ if (simple_pattern_matches(instance->config.charts_pattern, st->id) ||
+ simple_pattern_matches(instance->config.charts_pattern, st->name))
+ rrdset_flag_set(st, RRDSET_FLAG_BACKEND_SEND);
+ else {
+ rrdset_flag_set(st, RRDSET_FLAG_BACKEND_IGNORE);
+ debug(
+ D_BACKEND,
+ "EXPORTING: not sending chart '%s' of host '%s', because it is disabled for exporting.",
+ st->id,
+ host->hostname);
+ return 0;
+ }
+ }
+
+ if (unlikely(!rrdset_is_available_for_backends(st))) {
+ debug(
+ D_BACKEND,
+ "EXPORTING: not sending chart '%s' of host '%s', because it is not available for exporting.",
+ st->id,
+ host->hostname);
+ return 0;
+ }
+
+ if (unlikely(
+ st->rrd_memory_mode == RRD_MEMORY_MODE_NONE &&
+ !(EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED))) {
+ debug(
+ D_BACKEND,
+ "EXPORTING: not sending chart '%s' of host '%s' because its memory mode is '%s' and the exporting connector requires database access.",
+ st->id,
+ host->hostname,
+ rrd_memory_mode_name(host->rrd_memory_mode));
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct prometheus_server {
+ const char *server;
+ uint32_t hash;
+ RRDHOST *host;
+ time_t last_access;
+ struct prometheus_server *next;
+} *prometheus_server_root = NULL;
+
+static netdata_mutex_t prometheus_server_root_mutex = NETDATA_MUTEX_INITIALIZER;
+
+/**
+ * Clean server root local structure
+ */
+void prometheus_clean_server_root()
+{
+ if (prometheus_server_root) {
+ netdata_mutex_lock(&prometheus_server_root_mutex);
+
+ struct prometheus_server *ps;
+ for (ps = prometheus_server_root; ps; ) {
+ struct prometheus_server *current = ps;
+ ps = ps->next;
+ if(current->server)
+ freez((void *)current->server);
+
+ freez(current);
+ }
+ prometheus_server_root = NULL;
+ netdata_mutex_unlock(&prometheus_server_root_mutex);
+ }
+}
+
+/**
+ * Get the last time when a Prometheus server scraped the Netdata Prometheus exporter.
+ *
+ * @param server the name of the Prometheus server.
+ * @param host a data collecting host.
+ * @param now actual time.
+ * @return Returns the last time when the server accessed Netdata, or 0 if it is the first occurrence.
+ */
+static inline time_t prometheus_server_last_access(const char *server, RRDHOST *host, time_t now)
+{
+#ifdef UNIT_TESTING
+ return 0;
+#endif
+ uint32_t hash = simple_hash(server);
+
+ netdata_mutex_lock(&prometheus_server_root_mutex);
+
+ struct prometheus_server *ps;
+ for (ps = prometheus_server_root; ps; ps = ps->next) {
+ if (host == ps->host && hash == ps->hash && !strcmp(server, ps->server)) {
+ time_t last = ps->last_access;
+ ps->last_access = now;
+ netdata_mutex_unlock(&prometheus_server_root_mutex);
+ return last;
+ }
+ }
+
+ ps = callocz(1, sizeof(struct prometheus_server));
+ ps->server = strdupz(server);
+ ps->hash = hash;
+ ps->host = host;
+ ps->last_access = now;
+ ps->next = prometheus_server_root;
+ prometheus_server_root = ps;
+
+ netdata_mutex_unlock(&prometheus_server_root_mutex);
+ return 0;
+}
+
+/**
+ * Copy and sanitize name.
+ *
+ * @param d a destination string.
+ * @param s a source sting.
+ * @param usable the number of characters to copy.
+ * @return Returns the length of the copied string.
+ */
+inline size_t prometheus_name_copy(char *d, const char *s, size_t usable)
+{
+ size_t n;
+
+ for (n = 0; *s && n < usable; d++, s++, n++) {
+ register char c = *s;
+
+ if (!isalnum(c))
+ *d = '_';
+ else
+ *d = c;
+ }
+ *d = '\0';
+
+ return n;
+}
+
+/**
+ * Copy and sanitize label.
+ *
+ * @param d a destination string.
+ * @param s a source sting.
+ * @param usable the number of characters to copy.
+ * @return Returns the length of the copied string.
+ */
+inline size_t prometheus_label_copy(char *d, const char *s, size_t usable)
+{
+ size_t n;
+
+ // make sure we can escape one character without overflowing the buffer
+ usable--;
+
+ for (n = 0; *s && n < usable; d++, s++, n++) {
+ register char c = *s;
+
+ if (unlikely(c == '"' || c == '\\' || c == '\n')) {
+ *d++ = '\\';
+ n++;
+ }
+ *d = c;
+ }
+ *d = '\0';
+
+ return n;
+}
+
+/**
+ * Copy and sanitize units.
+ *
+ * @param d a destination string.
+ * @param s a source sting.
+ * @param usable the number of characters to copy.
+ * @param showoldunits set this flag to 1 to show old (before v1.12) units.
+ * @return Returns the destination string.
+ */
+inline char *prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits)
+{
+ const char *sorig = s;
+ char *ret = d;
+ size_t n;
+
+ // Fix for issue 5227
+ if (unlikely(showoldunits)) {
+ static struct {
+ const char *newunit;
+ uint32_t hash;
+ const char *oldunit;
+ } units[] = { { "KiB/s", 0, "kilobytes/s" },
+ { "MiB/s", 0, "MB/s" },
+ { "GiB/s", 0, "GB/s" },
+ { "KiB", 0, "KB" },
+ { "MiB", 0, "MB" },
+ { "GiB", 0, "GB" },
+ { "inodes", 0, "Inodes" },
+ { "percentage", 0, "percent" },
+ { "faults/s", 0, "page faults/s" },
+ { "KiB/operation", 0, "kilobytes per operation" },
+ { "milliseconds/operation", 0, "ms per operation" },
+ { NULL, 0, NULL } };
+ static int initialized = 0;
+ int i;
+
+ if (unlikely(!initialized)) {
+ for (i = 0; units[i].newunit; i++)
+ units[i].hash = simple_hash(units[i].newunit);
+ initialized = 1;
+ }
+
+ uint32_t hash = simple_hash(s);
+ for (i = 0; units[i].newunit; i++) {
+ if (unlikely(hash == units[i].hash && !strcmp(s, units[i].newunit))) {
+ // info("matched extension for filename '%s': '%s'", filename, last_dot);
+ s = units[i].oldunit;
+ sorig = s;
+ break;
+ }
+ }
+ }
+ *d++ = '_';
+ for (n = 1; *s && n < usable; d++, s++, n++) {
+ register char c = *s;
+
+ if (!isalnum(c))
+ *d = '_';
+ else
+ *d = c;
+ }
+
+ if (n == 2 && sorig[0] == '%') {
+ n = 0;
+ d = ret;
+ s = "_percent";
+ for (; *s && n < usable; n++)
+ *d++ = *s++;
+ } else if (n > 3 && sorig[n - 3] == '/' && sorig[n - 2] == 's') {
+ n = n - 2;
+ d -= 2;
+ s = "_persec";
+ for (; *s && n < usable; n++)
+ *d++ = *s++;
+ }
+
+ *d = '\0';
+
+ return ret;
+}
+
+/**
+ * Format host labels for the Prometheus exporter
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ */
+void format_host_labels_prometheus(struct instance *instance, RRDHOST *host)
+{
+ if (unlikely(!sending_labels_configured(instance)))
+ return;
+
+ if (!instance->labels)
+ instance->labels = buffer_create(1024);
+
+ int count = 0;
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ for (struct label *label = host->labels.head; label; label = label->next) {
+ if (!should_send_label(instance, label))
+ continue;
+
+ char key[PROMETHEUS_ELEMENT_MAX + 1];
+ char value[PROMETHEUS_ELEMENT_MAX + 1];
+
+ prometheus_name_copy(key, label->key, PROMETHEUS_ELEMENT_MAX);
+ prometheus_label_copy(value, label->value, PROMETHEUS_ELEMENT_MAX);
+
+ if (*key && *value) {
+ if (count > 0)
+ buffer_strcat(instance->labels, ",");
+ buffer_sprintf(instance->labels, "%s=\"%s\"", key, value);
+ count++;
+ }
+ }
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+}
+
+struct host_variables_callback_options {
+ RRDHOST *host;
+ BUFFER *wb;
+ EXPORTING_OPTIONS exporting_options;
+ PROMETHEUS_OUTPUT_OPTIONS output_options;
+ const char *prefix;
+ const char *labels;
+ time_t now;
+ int host_header_printed;
+ char name[PROMETHEUS_VARIABLE_MAX + 1];
+};
+
+/**
+ * Print host variables.
+ *
+ * @param rv a variable.
+ * @param data callback options.
+ * @return Returns 1 if the chart can be sent, 0 otherwise.
+ */
+static int print_host_variables(RRDVAR *rv, void *data)
+{
+ struct host_variables_callback_options *opts = data;
+
+ if (rv->options & (RRDVAR_OPTION_CUSTOM_HOST_VAR | RRDVAR_OPTION_CUSTOM_CHART_VAR)) {
+ if (!opts->host_header_printed) {
+ opts->host_header_printed = 1;
+
+ if (opts->output_options & PROMETHEUS_OUTPUT_HELP) {
+ buffer_sprintf(opts->wb, "\n# COMMENT global host and chart variables\n");
+ }
+ }
+
+ calculated_number value = rrdvar2number(rv);
+ if (isnan(value) || isinf(value)) {
+ if (opts->output_options & PROMETHEUS_OUTPUT_HELP)
+ buffer_sprintf(
+ opts->wb, "# COMMENT variable \"%s\" is %s. Skipped.\n", rv->name, (isnan(value)) ? "NAN" : "INF");
+
+ return 0;
+ }
+
+ char *label_pre = "";
+ char *label_post = "";
+ if (opts->labels && *opts->labels) {
+ label_pre = "{";
+ label_post = "}";
+ }
+
+ prometheus_name_copy(opts->name, rv->name, sizeof(opts->name));
+
+ if (opts->output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(
+ opts->wb,
+ "%s_%s%s%s%s " CALCULATED_NUMBER_FORMAT " %llu\n",
+ opts->prefix,
+ opts->name,
+ label_pre,
+ opts->labels,
+ label_post,
+ value,
+ opts->now * 1000ULL);
+ else
+ buffer_sprintf(
+ opts->wb,
+ "%s_%s%s%s%s " CALCULATED_NUMBER_FORMAT "\n",
+ opts->prefix,
+ opts->name,
+ label_pre,
+ opts->labels,
+ label_post,
+ value);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+struct gen_parameters {
+ const char *prefix;
+ char *context;
+ char *suffix;
+
+ char *chart;
+ char *dimension;
+ char *family;
+ char *labels;
+
+ PROMETHEUS_OUTPUT_OPTIONS output_options;
+ RRDSET *st;
+ RRDDIM *rd;
+
+ const char *relation;
+ const char *type;
+};
+
+/**
+ * Write an as-collected help comment to a buffer.
+ *
+ * @param wb the buffer to write the comment to.
+ * @param p parameters for generating the comment string.
+ * @param homogeneous a flag for homogeneous charts.
+ * @param prometheus_collector a flag for metrics from prometheus collector.
+ */
+static void generate_as_collected_prom_help(BUFFER *wb, struct gen_parameters *p, int homogeneous, int prometheus_collector)
+{
+ buffer_sprintf(wb, "# COMMENT %s_%s", p->prefix, p->context);
+
+ if (!homogeneous)
+ buffer_sprintf(wb, "_%s", p->dimension);
+
+ buffer_sprintf(
+ wb,
+ "%s: chart \"%s\", context \"%s\", family \"%s\", dimension \"%s\", value * ",
+ p->suffix,
+ (p->output_options & PROMETHEUS_OUTPUT_NAMES && p->st->name) ? p->st->name : p->st->id,
+ p->st->context,
+ p->st->family,
+ (p->output_options & PROMETHEUS_OUTPUT_NAMES && p->rd->name) ? p->rd->name : p->rd->id);
+
+ if (prometheus_collector)
+ buffer_sprintf(wb, "1 / 1");
+ else
+ buffer_sprintf(wb, COLLECTED_NUMBER_FORMAT " / " COLLECTED_NUMBER_FORMAT, p->rd->multiplier, p->rd->divisor);
+
+ buffer_sprintf(wb, " %s %s (%s)\n", p->relation, p->st->units, p->type);
+}
+
+/**
+ * Write an as-collected metric to a buffer.
+ *
+ * @param wb the buffer to write the metric to.
+ * @param p parameters for generating the metric string.
+ * @param homogeneous a flag for homogeneous charts.
+ * @param prometheus_collector a flag for metrics from prometheus collector.
+ */
+static void generate_as_collected_prom_metric(BUFFER *wb, struct gen_parameters *p, int homogeneous, int prometheus_collector)
+{
+ buffer_sprintf(wb, "%s_%s", p->prefix, p->context);
+
+ if (!homogeneous)
+ buffer_sprintf(wb, "_%s", p->dimension);
+
+ buffer_sprintf(wb, "%s{chart=\"%s\",family=\"%s\"", p->suffix, p->chart, p->family);
+
+ if (homogeneous)
+ buffer_sprintf(wb, ",dimension=\"%s\"", p->dimension);
+
+ buffer_sprintf(wb, "%s} ", p->labels);
+
+ if (prometheus_collector)
+ buffer_sprintf(
+ wb,
+ CALCULATED_NUMBER_FORMAT,
+ (calculated_number)p->rd->last_collected_value * (calculated_number)p->rd->multiplier /
+ (calculated_number)p->rd->divisor);
+ else
+ buffer_sprintf(wb, COLLECTED_NUMBER_FORMAT, p->rd->last_collected_value);
+
+ if (p->output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(wb, " %llu\n", timeval_msec(&p->rd->last_collected_time));
+ else
+ buffer_sprintf(wb, "\n");
+}
+
+/**
+ * Write metrics in Prometheus format to a buffer.
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @param wb the buffer to fill with metrics.
+ * @param prefix a prefix for every metric.
+ * @param exporting_options options to configure what data is exported.
+ * @param allhosts set to 1 if host instance should be in the output for tags.
+ * @param output_options options to configure the format of the output.
+ */
+static void rrd_stats_api_v1_charts_allmetrics_prometheus(
+ struct instance *instance,
+ RRDHOST *host,
+ BUFFER *wb,
+ const char *prefix,
+ EXPORTING_OPTIONS exporting_options,
+ int allhosts,
+ PROMETHEUS_OUTPUT_OPTIONS output_options)
+{
+ rrdhost_rdlock(host);
+
+ char hostname[PROMETHEUS_ELEMENT_MAX + 1];
+ prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX);
+
+ format_host_labels_prometheus(instance, host);
+
+ if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(
+ wb,
+ "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1 %llu\n",
+ hostname,
+ host->program_name,
+ host->program_version,
+ now_realtime_usec() / USEC_PER_MS);
+ else
+ buffer_sprintf(
+ wb,
+ "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"} 1\n",
+ hostname,
+ host->program_name,
+ host->program_version);
+
+ char labels[PROMETHEUS_LABELS_MAX + 1] = "";
+ if (allhosts) {
+ if (instance->labels && buffer_tostring(instance->labels)) {
+ if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) {
+ buffer_sprintf(
+ wb,
+ "netdata_host_tags_info{instance=\"%s\",%s} 1 %llu\n",
+ hostname,
+ buffer_tostring(instance->labels),
+ now_realtime_usec() / USEC_PER_MS);
+
+ // deprecated, exists only for compatibility with older queries
+ buffer_sprintf(
+ wb,
+ "netdata_host_tags{instance=\"%s\",%s} 1 %llu\n",
+ hostname,
+ buffer_tostring(instance->labels),
+ now_realtime_usec() / USEC_PER_MS);
+ } else {
+ buffer_sprintf(
+ wb, "netdata_host_tags_info{instance=\"%s\",%s} 1\n", hostname, buffer_tostring(instance->labels));
+
+ // deprecated, exists only for compatibility with older queries
+ buffer_sprintf(
+ wb, "netdata_host_tags{instance=\"%s\",%s} 1\n", hostname, buffer_tostring(instance->labels));
+ }
+ }
+
+ snprintfz(labels, PROMETHEUS_LABELS_MAX, ",instance=\"%s\"", hostname);
+ } else {
+ if (instance->labels && buffer_tostring(instance->labels)) {
+ if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS) {
+ buffer_sprintf(
+ wb,
+ "netdata_host_tags_info{%s} 1 %llu\n",
+ buffer_tostring(instance->labels),
+ now_realtime_usec() / USEC_PER_MS);
+
+ // deprecated, exists only for compatibility with older queries
+ buffer_sprintf(
+ wb,
+ "netdata_host_tags{%s} 1 %llu\n",
+ buffer_tostring(instance->labels),
+ now_realtime_usec() / USEC_PER_MS);
+ } else {
+ buffer_sprintf(wb, "netdata_host_tags_info{%s} 1\n", buffer_tostring(instance->labels));
+
+ // deprecated, exists only for compatibility with older queries
+ buffer_sprintf(wb, "netdata_host_tags{%s} 1\n", buffer_tostring(instance->labels));
+ }
+ }
+ }
+
+ if (instance->labels)
+ buffer_flush(instance->labels);
+
+ // send custom variables set for the host
+ if (output_options & PROMETHEUS_OUTPUT_VARIABLES) {
+ struct host_variables_callback_options opts = { .host = host,
+ .wb = wb,
+ .labels = (labels[0] == ',') ? &labels[1] : labels,
+ .exporting_options = exporting_options,
+ .output_options = output_options,
+ .prefix = prefix,
+ .now = now_realtime_sec(),
+ .host_header_printed = 0 };
+ foreach_host_variable_callback(host, print_host_variables, &opts);
+ }
+
+ // for each chart
+ RRDSET *st;
+ rrdset_foreach_read(st, host)
+ {
+
+ if (likely(can_send_rrdset(instance, st))) {
+ rrdset_rdlock(st);
+
+ char chart[PROMETHEUS_ELEMENT_MAX + 1];
+ char context[PROMETHEUS_ELEMENT_MAX + 1];
+ char family[PROMETHEUS_ELEMENT_MAX + 1];
+ char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
+
+ prometheus_label_copy(
+ chart, (output_options & PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id, PROMETHEUS_ELEMENT_MAX);
+ prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
+ prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
+
+ int as_collected = (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_AS_COLLECTED);
+ int homogeneous = 1;
+ int prometheus_collector = 0;
+ if (as_collected) {
+ if (rrdset_flag_check(st, RRDSET_FLAG_HOMOGENEOUS_CHECK))
+ rrdset_update_heterogeneous_flag(st);
+
+ if (rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS))
+ homogeneous = 0;
+
+ if (st->module_name && !strcmp(st->module_name, "prometheus"))
+ prometheus_collector = 1;
+ } else {
+ if (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_AVERAGE &&
+ !(output_options & PROMETHEUS_OUTPUT_HIDEUNITS))
+ prometheus_units_copy(
+ units, st->units, PROMETHEUS_ELEMENT_MAX, output_options & PROMETHEUS_OUTPUT_OLDUNITS);
+ }
+
+ if (unlikely(output_options & PROMETHEUS_OUTPUT_HELP))
+ buffer_sprintf(
+ wb,
+ "\n# COMMENT %s chart \"%s\", context \"%s\", family \"%s\", units \"%s\"\n",
+ (homogeneous) ? "homogeneous" : "heterogeneous",
+ (output_options & PROMETHEUS_OUTPUT_NAMES && st->name) ? st->name : st->id,
+ st->context,
+ st->family,
+ st->units);
+
+ // for each dimension
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st)
+ {
+ if (rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
+ char dimension[PROMETHEUS_ELEMENT_MAX + 1];
+ char *suffix = "";
+
+ if (as_collected) {
+ // we need as-collected / raw data
+
+ struct gen_parameters p;
+ p.prefix = prefix;
+ p.context = context;
+ p.suffix = suffix;
+ p.chart = chart;
+ p.dimension = dimension;
+ p.family = family;
+ p.labels = labels;
+ p.output_options = output_options;
+ p.st = st;
+ p.rd = rd;
+
+ if (unlikely(rd->last_collected_time.tv_sec < instance->after))
+ continue;
+
+ p.type = "gauge";
+ p.relation = "gives";
+ if (rd->algorithm == RRD_ALGORITHM_INCREMENTAL ||
+ rd->algorithm == RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL) {
+ p.type = "counter";
+ p.relation = "delta gives";
+ p.suffix = "_total";
+ }
+
+ if (homogeneous) {
+ // all the dimensions of the chart, has the same algorithm, multiplier and divisor
+ // we add all dimensions as labels
+
+ prometheus_label_copy(
+ dimension,
+ (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id,
+ PROMETHEUS_ELEMENT_MAX);
+
+ if (unlikely(output_options & PROMETHEUS_OUTPUT_HELP))
+ generate_as_collected_prom_help(wb, &p, homogeneous, prometheus_collector);
+
+ if (unlikely(output_options & PROMETHEUS_OUTPUT_TYPES))
+ buffer_sprintf(wb, "# TYPE %s_%s%s %s\n", prefix, context, suffix, p.type);
+
+ generate_as_collected_prom_metric(wb, &p, homogeneous, prometheus_collector);
+ } else {
+ // the dimensions of the chart, do not have the same algorithm, multiplier or divisor
+ // we create a metric per dimension
+
+ prometheus_name_copy(
+ dimension,
+ (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id,
+ PROMETHEUS_ELEMENT_MAX);
+
+ if (unlikely(output_options & PROMETHEUS_OUTPUT_HELP))
+ generate_as_collected_prom_help(wb, &p, homogeneous, prometheus_collector);
+
+ if (unlikely(output_options & PROMETHEUS_OUTPUT_TYPES))
+ buffer_sprintf(
+ wb, "# TYPE %s_%s_%s%s %s\n", prefix, context, dimension, suffix, p.type);
+
+ generate_as_collected_prom_metric(wb, &p, homogeneous, prometheus_collector);
+ }
+ } else {
+ // we need average or sum of the data
+
+ time_t first_time = instance->after;
+ time_t last_time = instance->before;
+ calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_time);
+
+ if (!isnan(value) && !isinf(value)) {
+ if (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_AVERAGE)
+ suffix = "_average";
+ else if (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_SUM)
+ suffix = "_sum";
+
+ prometheus_label_copy(
+ dimension,
+ (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id,
+ PROMETHEUS_ELEMENT_MAX);
+
+ if (unlikely(output_options & PROMETHEUS_OUTPUT_HELP))
+ buffer_sprintf(
+ wb,
+ "# COMMENT %s_%s%s%s: dimension \"%s\", value is %s, gauge, dt %llu to %llu inclusive\n",
+ prefix,
+ context,
+ units,
+ suffix,
+ (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id,
+ st->units,
+ (unsigned long long)first_time,
+ (unsigned long long)last_time);
+
+ if (unlikely(output_options & PROMETHEUS_OUTPUT_TYPES))
+ buffer_sprintf(wb, "# TYPE %s_%s%s%s gauge\n", prefix, context, units, suffix);
+
+ if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(
+ wb,
+ "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " CALCULATED_NUMBER_FORMAT
+ " %llu\n",
+ prefix,
+ context,
+ units,
+ suffix,
+ chart,
+ family,
+ dimension,
+ labels,
+ value,
+ last_time * MSEC_PER_SEC);
+ else
+ buffer_sprintf(
+ wb,
+ "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " CALCULATED_NUMBER_FORMAT
+ "\n",
+ prefix,
+ context,
+ units,
+ suffix,
+ chart,
+ family,
+ dimension,
+ labels,
+ value);
+ }
+ }
+ }
+ }
+
+ rrdset_unlock(st);
+ }
+ }
+
+ rrdhost_unlock(host);
+}
+
+/**
+ * Get the last time time when a server accessed Netdata. Write information about an API request to a buffer.
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @param wb the buffer to write to.
+ * @param exporting_options options to configure what data is exported.
+ * @param server the name of a Prometheus server..
+ * @param now actual time.
+ * @param output_options options to configure the format of the output.
+ * @return Returns the last time when the server accessed Netdata.
+ */
+static inline time_t prometheus_preparation(
+ struct instance *instance,
+ RRDHOST *host,
+ BUFFER *wb,
+ EXPORTING_OPTIONS exporting_options,
+ const char *server,
+ time_t now,
+ PROMETHEUS_OUTPUT_OPTIONS output_options)
+{
+ if (!server || !*server)
+ server = "default";
+
+ time_t after = prometheus_server_last_access(server, host, now);
+
+ int first_seen = 0;
+ if (!after) {
+ after = now - instance->config.update_every;
+ first_seen = 1;
+ }
+
+ if (after > now) {
+ // oops! this should never happen
+ after = now - instance->config.update_every;
+ }
+
+ if (output_options & PROMETHEUS_OUTPUT_HELP) {
+ char *mode;
+ if (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ mode = "as collected";
+ else if (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_AVERAGE)
+ mode = "average";
+ else if (EXPORTING_OPTIONS_DATA_SOURCE(exporting_options) == EXPORTING_SOURCE_DATA_SUM)
+ mode = "sum";
+ else
+ mode = "unknown";
+
+ buffer_sprintf(
+ wb,
+ "# COMMENT netdata \"%s\" to %sprometheus \"%s\", source \"%s\", last seen %lu %s, time range %lu to %lu\n\n",
+ host->hostname,
+ (first_seen) ? "FIRST SEEN " : "",
+ server,
+ mode,
+ (unsigned long)((first_seen) ? 0 : (now - after)),
+ (first_seen) ? "never" : "seconds ago",
+ (unsigned long)after,
+ (unsigned long)now);
+ }
+
+ return after;
+}
+
+/**
+ * Write metrics and auxiliary information for one host to a buffer.
+ *
+ * @param host a data collecting host.
+ * @param wb the buffer to write to.
+ * @param server the name of a Prometheus server.
+ * @param prefix a prefix for every metric.
+ * @param exporting_options options to configure what data is exported.
+ * @param output_options options to configure the format of the output.
+ */
+void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
+ RRDHOST *host,
+ BUFFER *wb,
+ const char *server,
+ const char *prefix,
+ EXPORTING_OPTIONS exporting_options,
+ PROMETHEUS_OUTPUT_OPTIONS output_options)
+{
+ if (unlikely(!prometheus_exporter_instance))
+ return;
+
+ prometheus_exporter_instance->before = now_realtime_sec();
+
+ // we start at the point we had stopped before
+ prometheus_exporter_instance->after = prometheus_preparation(
+ prometheus_exporter_instance,
+ host,
+ wb,
+ exporting_options,
+ server,
+ prometheus_exporter_instance->before,
+ output_options);
+
+ rrd_stats_api_v1_charts_allmetrics_prometheus(
+ prometheus_exporter_instance, host, wb, prefix, exporting_options, 0, output_options);
+}
+
+/**
+ * Write metrics and auxiliary information for all hosts to a buffer.
+ *
+ * @param host a data collecting host.
+ * @param wb the buffer to write to.
+ * @param server the name of a Prometheus server.
+ * @param prefix a prefix for every metric.
+ * @param exporting_options options to configure what data is exported.
+ * @param output_options options to configure the format of the output.
+ */
+void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
+ RRDHOST *host,
+ BUFFER *wb,
+ const char *server,
+ const char *prefix,
+ EXPORTING_OPTIONS exporting_options,
+ PROMETHEUS_OUTPUT_OPTIONS output_options)
+{
+ if (unlikely(!prometheus_exporter_instance))
+ return;
+
+ prometheus_exporter_instance->before = now_realtime_sec();
+
+ // we start at the point we had stopped before
+ prometheus_exporter_instance->after = prometheus_preparation(
+ prometheus_exporter_instance,
+ host,
+ wb,
+ exporting_options,
+ server,
+ prometheus_exporter_instance->before,
+ output_options);
+
+ rrd_rdlock();
+ rrdhost_foreach_read(host)
+ {
+ rrd_stats_api_v1_charts_allmetrics_prometheus(
+ prometheus_exporter_instance, host, wb, prefix, exporting_options, 1, output_options);
+ }
+ rrd_unlock();
+}
diff --git a/exporting/prometheus/prometheus.h b/exporting/prometheus/prometheus.h
new file mode 100644
index 000000000..2f0845ce9
--- /dev/null
+++ b/exporting/prometheus/prometheus.h
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_PROMETHEUS_H
+#define NETDATA_EXPORTING_PROMETHEUS_H 1
+
+#include "exporting/exporting_engine.h"
+
+#define PROMETHEUS_ELEMENT_MAX 256
+#define PROMETHEUS_LABELS_MAX 1024
+#define PROMETHEUS_VARIABLE_MAX 256
+
+#define PROMETHEUS_LABELS_MAX_NUMBER 128
+
+typedef enum prometheus_output_flags {
+ PROMETHEUS_OUTPUT_NONE = 0,
+ PROMETHEUS_OUTPUT_HELP = (1 << 0),
+ PROMETHEUS_OUTPUT_TYPES = (1 << 1),
+ PROMETHEUS_OUTPUT_NAMES = (1 << 2),
+ PROMETHEUS_OUTPUT_TIMESTAMPS = (1 << 3),
+ PROMETHEUS_OUTPUT_VARIABLES = (1 << 4),
+ PROMETHEUS_OUTPUT_OLDUNITS = (1 << 5),
+ PROMETHEUS_OUTPUT_HIDEUNITS = (1 << 6)
+} PROMETHEUS_OUTPUT_OPTIONS;
+
+extern void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
+ RRDHOST *host, BUFFER *wb, const char *server, const char *prefix,
+ EXPORTING_OPTIONS exporting_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
+extern void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
+ RRDHOST *host, BUFFER *wb, const char *server, const char *prefix,
+ EXPORTING_OPTIONS exporting_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
+
+int can_send_rrdset(struct instance *instance, RRDSET *st);
+size_t prometheus_name_copy(char *d, const char *s, size_t usable);
+size_t prometheus_label_copy(char *d, const char *s, size_t usable);
+char *prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits);
+
+void format_host_labels_prometheus(struct instance *instance, RRDHOST *host);
+
+extern void prometheus_clean_server_root();
+
+#endif //NETDATA_EXPORTING_PROMETHEUS_H
diff --git a/exporting/prometheus/remote_write/Makefile.am b/exporting/prometheus/remote_write/Makefile.am
new file mode 100644
index 000000000..d049ef48c
--- /dev/null
+++ b/exporting/prometheus/remote_write/Makefile.am
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+CLEANFILES = \
+ remote_write.pb.cc \
+ remote_write.pb.h \
+ $(NULL)
+
+dist_noinst_DATA = \
+ remote_write.proto \
+ README.md \
+ $(NULL)
diff --git a/exporting/prometheus/remote_write/README.md b/exporting/prometheus/remote_write/README.md
new file mode 100644
index 000000000..fe901024b
--- /dev/null
+++ b/exporting/prometheus/remote_write/README.md
@@ -0,0 +1,51 @@
+<!--
+title: "Export metrics to Prometheus remote write providers"
+description: "Send Netdata metrics to your choice of more than 20 external storage providers for long-term archiving and further analysis."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/prometheus/remote_write/README.md
+sidebar_label: Prometheus remote write
+-->
+
+# Prometheus remote write exporting connector
+
+The Prometheus remote write exporting connector uses the exporting engine to send Netdata metrics to your choice of more
+than 20 external storage providers for long-term archiving and further analysis.
+
+## Prerequisites
+
+To use the Prometheus remote write API with [storage
+providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage), install
+[protobuf](https://developers.google.com/protocol-buffers/) and [snappy](https://github.com/google/snappy) libraries.
+Next, [reinstall Netdata](/packaging/installer/REINSTALL.md), which detects that the required libraries and utilities
+are now available.
+
+## Configuration
+
+To enable data exporting to a storage provider using the Prometheus remote write API, run `./edit-config exporting.conf`
+in the Netdata configuration directory and set the following options:
+
+```conf
+[prometheus_remote_write:my_instance]
+ enabled = yes
+ destination = example.domain:example_port
+ remote write URL path = /receive
+```
+
+You can also add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example:
+`remote_write:https:my_instance`.
+
+`remote write URL path` is used to set an endpoint path for the remote write protocol. The default value is `/receive`.
+For example, if your endpoint is `http://example.domain:example_port/storage/read`:
+
+```conf
+ destination = example.domain:example_port
+ remote write URL path = /storage/read
+```
+
+`buffered` and `lost` dimensions in the Netdata Exporting Connector Data Size operation monitoring chart estimate uncompressed
+buffer size on failures.
+
+## Notes
+
+The remote write exporting connector does not support `buffer on failures`
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Fprometheus%2Fremote_write%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/prometheus/remote_write/remote_write.c b/exporting/prometheus/remote_write/remote_write.c
new file mode 100644
index 000000000..30bd05ad7
--- /dev/null
+++ b/exporting/prometheus/remote_write/remote_write.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "remote_write.h"
+
+static int as_collected;
+static int homogeneous;
+char context[PROMETHEUS_ELEMENT_MAX + 1];
+char chart[PROMETHEUS_ELEMENT_MAX + 1];
+char family[PROMETHEUS_ELEMENT_MAX + 1];
+char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
+
+/**
+ * Prepare HTTP header
+ *
+ * @param instance an instance data structure.
+ */
+void prometheus_remote_write_prepare_header(struct instance *instance)
+{
+ struct prometheus_remote_write_specific_config *connector_specific_config =
+ instance->config.connector_specific_config;
+ struct simple_connector_data *simple_connector_data = instance->connector_specific_data;
+
+ buffer_sprintf(
+ simple_connector_data->last_buffer->header,
+ "POST %s HTTP/1.1\r\n"
+ "Host: %s\r\n"
+ "Accept: */*\r\n"
+ "Content-Encoding: snappy\r\n"
+ "Content-Type: application/x-protobuf\r\n"
+ "X-Prometheus-Remote-Write-Version: 0.1.0\r\n"
+ "Content-Length: %zu\r\n"
+ "\r\n",
+ connector_specific_config->remote_write_path,
+ instance->config.destination,
+ buffer_strlen(simple_connector_data->last_buffer->buffer));
+}
+
+/**
+ * Process a responce received after Prometheus remote write connector had sent data
+ *
+ * @param buffer a response from a remote service.
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int process_prometheus_remote_write_response(BUFFER *buffer, struct instance *instance)
+{
+ if (unlikely(!buffer))
+ return 1;
+
+ const char *s = buffer_tostring(buffer);
+ int len = buffer_strlen(buffer);
+
+ // do nothing with HTTP responses 200 or 204
+
+ while (!isspace(*s) && len) {
+ s++;
+ len--;
+ }
+ s++;
+ len--;
+
+ if (likely(len > 4 && (!strncmp(s, "200 ", 4) || !strncmp(s, "204 ", 4))))
+ return 0;
+ else
+ return exporting_discard_response(buffer, instance);
+}
+
+/**
+ * Release specific data allocated.
+ *
+ * @param instance an instance data structure.
+ */
+void clean_prometheus_remote_write(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data = instance->connector_specific_data;
+ freez(simple_connector_data->connector_specific_data);
+
+ struct prometheus_remote_write_specific_config *connector_specific_config =
+ instance->config.connector_specific_config;
+ freez(connector_specific_config->remote_write_path);
+}
+
+/**
+ * Initialize Prometheus Remote Write connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_prometheus_remote_write_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_prometheus_remote_write;
+ instance->start_chart_formatting = format_chart_prometheus_remote_write;
+ instance->metric_formatting = format_dimension_prometheus_remote_write;
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = NULL;
+ instance->end_batch_formatting = format_batch_prometheus_remote_write;
+
+ instance->prepare_header = prometheus_remote_write_prepare_header;
+ instance->check_response = process_prometheus_remote_write_response;
+
+ instance->buffer = (void *)buffer_create(0);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ struct simple_connector_data *simple_connector_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = simple_connector_data;
+
+#ifdef ENABLE_HTTPS
+ simple_connector_data->flags = NETDATA_SSL_START;
+ simple_connector_data->conn = NULL;
+ if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
+ security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ }
+#endif
+
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ callocz(1, sizeof(struct prometheus_remote_write_specific_data));
+ simple_connector_data->connector_specific_data = (void *)connector_specific_data;
+
+ simple_connector_init(instance);
+
+ connector_specific_data->write_request = init_write_request();
+
+ instance->engine->protocol_buffers_initialized = 1;
+
+ return 0;
+}
+
+/**
+ * Format host data for Prometheus Remote Write connector
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @return Always returns 0.
+ */
+int format_host_prometheus_remote_write(struct instance *instance, RRDHOST *host)
+{
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ (struct prometheus_remote_write_specific_data *)simple_connector_data->connector_specific_data;
+
+ char hostname[PROMETHEUS_ELEMENT_MAX + 1];
+ prometheus_label_copy(
+ hostname,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ PROMETHEUS_ELEMENT_MAX);
+
+ add_host_info(
+ connector_specific_data->write_request,
+ "netdata_info", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS);
+
+ if (unlikely(sending_labels_configured(instance))) {
+ rrdhost_check_rdlock(host);
+ netdata_rwlock_rdlock(&host->labels.labels_rwlock);
+ for (struct label *label = host->labels.head; label; label = label->next) {
+ if (!should_send_label(instance, label))
+ continue;
+
+ char key[PROMETHEUS_ELEMENT_MAX + 1];
+ prometheus_name_copy(key, label->key, PROMETHEUS_ELEMENT_MAX);
+
+ char value[PROMETHEUS_ELEMENT_MAX + 1];
+ prometheus_label_copy(value, label->value, PROMETHEUS_ELEMENT_MAX);
+
+ add_label(connector_specific_data->write_request, key, value);
+ }
+ netdata_rwlock_unlock(&host->labels.labels_rwlock);
+ }
+
+ return 0;
+}
+
+/**
+ * Format chart data for Prometheus Remote Write connector
+ *
+ * @param instance an instance data structure.
+ * @param st a chart.
+ * @return Always returns 0.
+ */
+int format_chart_prometheus_remote_write(struct instance *instance, RRDSET *st)
+{
+ prometheus_label_copy(
+ chart,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? st->name : st->id,
+ PROMETHEUS_ELEMENT_MAX);
+ prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
+ prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
+
+ as_collected = (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED);
+ homogeneous = 1;
+ if (as_collected) {
+ if (rrdset_flag_check(st, RRDSET_FLAG_HOMOGENEOUS_CHECK))
+ rrdset_update_heterogeneous_flag(st);
+
+ if (rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS))
+ homogeneous = 0;
+ } else {
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AVERAGE)
+ prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, 0);
+ }
+
+ return 0;
+}
+
+/**
+ * Format dimension data for Prometheus Remote Write connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_prometheus_remote_write(struct instance *instance, RRDDIM *rd)
+{
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ (struct prometheus_remote_write_specific_data *)simple_connector_data->connector_specific_data;
+
+ if (rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
+ char name[PROMETHEUS_LABELS_MAX + 1];
+ char dimension[PROMETHEUS_ELEMENT_MAX + 1];
+ char *suffix = "";
+ RRDHOST *host = rd->rrdset->rrdhost;
+
+ if (as_collected) {
+ // we need as-collected / raw data
+
+ if (unlikely(rd->last_collected_time.tv_sec < instance->after)) {
+ debug(
+ D_BACKEND,
+ "EXPORTING: not sending dimension '%s' of chart '%s' from host '%s', "
+ "its last data collection (%lu) is not within our timeframe (%lu to %lu)",
+ rd->id, rd->rrdset->id,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ (unsigned long)rd->last_collected_time.tv_sec,
+ (unsigned long)instance->after,
+ (unsigned long)instance->before);
+ return 0;
+ }
+
+ if (homogeneous) {
+ // all the dimensions of the chart, has the same algorithm, multiplier and divisor
+ // we add all dimensions as labels
+
+ prometheus_label_copy(
+ dimension,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ PROMETHEUS_ELEMENT_MAX);
+ snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", instance->config.prefix, context, suffix);
+
+ add_metric(
+ connector_specific_data->write_request,
+ name, chart, family, dimension,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ rd->last_collected_value, timeval_msec(&rd->last_collected_time));
+ } else {
+ // the dimensions of the chart, do not have the same algorithm, multiplier or divisor
+ // we create a metric per dimension
+
+ prometheus_name_copy(
+ dimension,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ PROMETHEUS_ELEMENT_MAX);
+ snprintf(
+ name, PROMETHEUS_LABELS_MAX, "%s_%s_%s%s", instance->config.prefix, context, dimension,
+ suffix);
+
+ add_metric(
+ connector_specific_data->write_request,
+ name, chart, family, NULL,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ rd->last_collected_value, timeval_msec(&rd->last_collected_time));
+ }
+ } else {
+ // we need average or sum of the data
+
+ time_t last_t = instance->before;
+ calculated_number value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
+
+ if (!isnan(value) && !isinf(value)) {
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AVERAGE)
+ suffix = "_average";
+ else if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_SUM)
+ suffix = "_sum";
+
+ prometheus_label_copy(
+ dimension,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id,
+ PROMETHEUS_ELEMENT_MAX);
+ snprintf(
+ name, PROMETHEUS_LABELS_MAX, "%s_%s%s%s", instance->config.prefix, context, units, suffix);
+
+ add_metric(
+ connector_specific_data->write_request,
+ name, chart, family, dimension,
+ (host == localhost) ? instance->config.hostname : host->hostname,
+ value, last_t * MSEC_PER_SEC);
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Format a batch for Prometheus Remote Write connector
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int format_batch_prometheus_remote_write(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ (struct prometheus_remote_write_specific_data *)simple_connector_data->connector_specific_data;
+
+ size_t data_size = get_write_request_size(connector_specific_data->write_request);
+
+ if (unlikely(!data_size)) {
+ error("EXPORTING: write request size is out of range");
+ return 1;
+ }
+
+ BUFFER *buffer = instance->buffer;
+
+ buffer_need_bytes(buffer, data_size);
+ if (unlikely(pack_and_clear_write_request(connector_specific_data->write_request, buffer->buffer, &data_size))) {
+ error("EXPORTING: cannot pack write request");
+ return 1;
+ }
+ buffer->len = data_size;
+ instance->stats.buffered_bytes = (collected_number)buffer_strlen(buffer);
+
+ simple_connector_end_batch(instance);
+
+ return 0;
+}
diff --git a/exporting/prometheus/remote_write/remote_write.h b/exporting/prometheus/remote_write/remote_write.h
new file mode 100644
index 000000000..d738f5126
--- /dev/null
+++ b/exporting/prometheus/remote_write/remote_write.h
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_PROMETHEUS_REMOTE_WRITE_H
+#define NETDATA_EXPORTING_PROMETHEUS_REMOTE_WRITE_H
+
+#include "exporting/exporting_engine.h"
+#include "exporting/prometheus/prometheus.h"
+#include "remote_write_request.h"
+
+struct prometheus_remote_write_specific_data {
+ void *write_request;
+};
+
+int init_prometheus_remote_write_instance(struct instance *instance);
+extern void clean_prometheus_remote_write(struct instance *instance);
+
+int format_host_prometheus_remote_write(struct instance *instance, RRDHOST *host);
+int format_chart_prometheus_remote_write(struct instance *instance, RRDSET *st);
+int format_dimension_prometheus_remote_write(struct instance *instance, RRDDIM *rd);
+int format_batch_prometheus_remote_write(struct instance *instance);
+
+void prometheus_remote_write_prepare_header(struct instance *instance);
+int process_prometheus_remote_write_response(BUFFER *buffer, struct instance *instance);
+
+#endif //NETDATA_EXPORTING_PROMETHEUS_REMOTE_WRITE_H
diff --git a/exporting/prometheus/remote_write/remote_write.proto b/exporting/prometheus/remote_write/remote_write.proto
new file mode 100644
index 000000000..dfde254e1
--- /dev/null
+++ b/exporting/prometheus/remote_write/remote_write.proto
@@ -0,0 +1,29 @@
+syntax = "proto3";
+package prometheus;
+
+option cc_enable_arenas = true;
+
+import "google/protobuf/descriptor.proto";
+
+message WriteRequest {
+ repeated TimeSeries timeseries = 1 [(nullable) = false];
+}
+
+message TimeSeries {
+ repeated Label labels = 1 [(nullable) = false];
+ repeated Sample samples = 2 [(nullable) = false];
+}
+
+message Label {
+ string name = 1;
+ string value = 2;
+}
+
+message Sample {
+ double value = 1;
+ int64 timestamp = 2;
+}
+
+extend google.protobuf.FieldOptions {
+ bool nullable = 65001;
+}
diff --git a/exporting/prometheus/remote_write/remote_write_request.cc b/exporting/prometheus/remote_write/remote_write_request.cc
new file mode 100644
index 000000000..48d19efd6
--- /dev/null
+++ b/exporting/prometheus/remote_write/remote_write_request.cc
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <snappy.h>
+#include "remote_write.pb.h"
+#include "remote_write_request.h"
+
+using namespace prometheus;
+
+google::protobuf::Arena arena;
+
+/**
+ * Initialize a write request
+ *
+ * @return Returns a new write request
+ */
+void *init_write_request()
+{
+ GOOGLE_PROTOBUF_VERIFY_VERSION;
+ WriteRequest *write_request = google::protobuf::Arena::CreateMessage<WriteRequest>(&arena);
+ return (void *)write_request;
+}
+
+/**
+ * Adds information about a host to a write request
+ *
+ * @param write_request_p the write request
+ * @param name the name of a metric which is used for providing the host information
+ * @param instance the name of the host itself
+ * @param application the name of a program which sends the information
+ * @param version the version of the program
+ * @param timestamp the timestamp for the metric in milliseconds
+ */
+void add_host_info(
+ void *write_request_p,
+ const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp)
+{
+ WriteRequest *write_request = (WriteRequest *)write_request_p;
+ TimeSeries *timeseries;
+ Sample *sample;
+ Label *label;
+
+ timeseries = write_request->add_timeseries();
+
+ label = timeseries->add_labels();
+ label->set_name("__name__");
+ label->set_value(name);
+
+ label = timeseries->add_labels();
+ label->set_name("instance");
+ label->set_value(instance);
+
+ if (application) {
+ label = timeseries->add_labels();
+ label->set_name("application");
+ label->set_value(application);
+ }
+
+ if (version) {
+ label = timeseries->add_labels();
+ label->set_name("version");
+ label->set_value(version);
+ }
+
+ sample = timeseries->add_samples();
+ sample->set_value(1);
+ sample->set_timestamp(timestamp);
+}
+
+/**
+ * Adds a label to the last created timeseries
+ *
+ * @param write_request_p the write request with the timeseries
+ * @param key the key of the label
+ * @param value the value of the label
+ */
+void add_label(void *write_request_p, char *key, char *value)
+{
+ WriteRequest *write_request = (WriteRequest *)write_request_p;
+ TimeSeries *timeseries;
+ Label *label;
+
+ timeseries = write_request->mutable_timeseries(write_request->timeseries_size() - 1);
+
+ label = timeseries->add_labels();
+ label->set_name(key);
+ label->set_value(value);
+}
+
+/**
+ * Adds a metric to a write request
+ *
+ * @param write_request_p the write request
+ * @param name the name of the metric
+ * @param chart the chart, the metric belongs to
+ * @param family the family, the metric belongs to
+ * @param dimension the dimension, the metric belongs to
+ * @param instance the name of the host, the metric belongs to
+ * @param value the value of the metric
+ * @param timestamp the timestamp for the metric in milliseconds
+ */
+void add_metric(
+ void *write_request_p,
+ const char *name, const char *chart, const char *family, const char *dimension, const char *instance,
+ const double value, const int64_t timestamp)
+{
+ WriteRequest *write_request = (WriteRequest *)write_request_p;
+ TimeSeries *timeseries;
+ Sample *sample;
+ Label *label;
+
+ timeseries = write_request->add_timeseries();
+
+ label = timeseries->add_labels();
+ label->set_name("__name__");
+ label->set_value(name);
+
+ label = timeseries->add_labels();
+ label->set_name("chart");
+ label->set_value(chart);
+
+ label = timeseries->add_labels();
+ label->set_name("family");
+ label->set_value(family);
+
+ if (dimension) {
+ label = timeseries->add_labels();
+ label->set_name("dimension");
+ label->set_value(dimension);
+ }
+
+ label = timeseries->add_labels();
+ label->set_name("instance");
+ label->set_value(instance);
+
+ sample = timeseries->add_samples();
+ sample->set_value(value);
+ sample->set_timestamp(timestamp);
+}
+
+/**
+ * Gets the size of a write request
+ *
+ * @param write_request_p the write request
+ * @return Returns the size of the write request
+ */
+size_t get_write_request_size(void *write_request_p)
+{
+ WriteRequest *write_request = (WriteRequest *)write_request_p;
+
+#if GOOGLE_PROTOBUF_VERSION < 3001000
+ size_t size = (size_t)snappy::MaxCompressedLength(write_request->ByteSize());
+#else
+ size_t size = (size_t)snappy::MaxCompressedLength(write_request->ByteSizeLong());
+#endif
+
+ return (size < INT_MAX) ? size : 0;
+}
+
+/**
+ * Packs a write request into a buffer and clears the request
+ *
+ * @param write_request_p the write request
+ * @param buffer a buffer, where compressed data is written
+ * @param size gets the size of the write request, returns the size of the compressed data
+ * @return Returns 0 on success, 1 on failure
+ */
+int pack_and_clear_write_request(void *write_request_p, char *buffer, size_t *size)
+{
+ WriteRequest *write_request = (WriteRequest *)write_request_p;
+ std::string uncompressed_write_request;
+
+ if (write_request->SerializeToString(&uncompressed_write_request) == false)
+ return 1;
+ write_request->clear_timeseries();
+ snappy::RawCompress(uncompressed_write_request.data(), uncompressed_write_request.size(), buffer, size);
+
+ return 0;
+}
+
+/**
+ * Shuts down the Protobuf library
+ */
+void protocol_buffers_shutdown()
+{
+ google::protobuf::ShutdownProtobufLibrary();
+}
diff --git a/exporting/prometheus/remote_write/remote_write_request.h b/exporting/prometheus/remote_write/remote_write_request.h
new file mode 100644
index 000000000..e1dfacaf8
--- /dev/null
+++ b/exporting/prometheus/remote_write/remote_write_request.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_PROMETHEUS_REMOTE_WRITE_REQUEST_H
+#define NETDATA_EXPORTING_PROMETHEUS_REMOTE_WRITE_REQUEST_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void *init_write_request();
+
+void add_host_info(
+ void *write_request_p,
+ const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp);
+
+void add_label(void *write_request_p, char *key, char *value);
+
+void add_metric(
+ void *write_request_p,
+ const char *name, const char *chart, const char *family, const char *dimension,
+ const char *instance, const double value, const int64_t timestamp);
+
+size_t get_write_request_size(void *write_request_p);
+
+int pack_and_clear_write_request(void *write_request_p, char *buffer, size_t *size);
+
+void protocol_buffers_shutdown();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //NETDATA_EXPORTING_PROMETHEUS_REMOTE_WRITE_REQUEST_H
diff --git a/exporting/pubsub/Makefile.am b/exporting/pubsub/Makefile.am
new file mode 100644
index 000000000..161784b8f
--- /dev/null
+++ b/exporting/pubsub/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/exporting/pubsub/README.md b/exporting/pubsub/README.md
new file mode 100644
index 000000000..6da14c44f
--- /dev/null
+++ b/exporting/pubsub/README.md
@@ -0,0 +1,45 @@
+<!--
+title: "Export metrics to Google Cloud Pub/Sub Service"
+description: "Export Netdata metrics to the Google Cloud Pub/Sub Service for long-term archiving or analytical processing."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/pubsub/README.md
+sidebar_label: Google Cloud Pub/Sub Service
+-->
+
+# Export metrics to Google Cloud Pub/Sub Service
+
+## Prerequisites
+
+To use the Pub/Sub service for metric collecting and processing, you should first
+[install](https://github.com/googleapis/cpp-cmakefiles) Google Cloud Platform C++ Proto Libraries.
+Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`. Next,
+Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.
+
+> You [cannot compile Netdata](https://github.com/netdata/netdata/issues/10193) with Pub/Sub support enabled using
+> `grpc` 1.32 or higher.
+>
+> Some distributions don't have `.cmake` files in packages. To build the C++ Proto Libraries on such distributions we
+> advise you to delete `protobuf`, `protoc`, and `grpc` related packages and
+> [install](https://github.com/grpc/grpc/blob/master/BUILDING.md) `grpc` with its dependencies from source.
+
+## Configuration
+
+To enable data sending to the Pub/Sub service, run `./edit-config exporting.conf` in the Netdata configuration directory
+and set the following options:
+
+```conf
+[pubsub:my_instance]
+ enabled = yes
+ destination = pubsub.googleapis.com
+ credentials file = /etc/netdata/google_cloud_credentials.json
+ project id = my_project
+ topic id = my_topic
+```
+
+Set the `destination` option to a Pub/Sub service endpoint. `pubsub.googleapis.com` is the default one.
+
+Next, create the credentials JSON file by following Google Cloud's [authentication guide](https://cloud.google.com/docs/authentication/getting-started#creating_a_service_account). The user running the Agent
+(typically `netdata`) needs read access to `google_cloud_credentials.json`, which you can set with
+`chmod 400 google_cloud_credentials.json; chown netdata google_cloud_credentials.json`. Set the `credentials file`
+option to the full path of the file.
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fexporting%2Fpubsub%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/exporting/pubsub/pubsub.c b/exporting/pubsub/pubsub.c
new file mode 100644
index 000000000..336a096ab
--- /dev/null
+++ b/exporting/pubsub/pubsub.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "pubsub.h"
+
+/**
+ * Initialize Pub/Sub connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_pubsub_instance(struct instance *instance)
+{
+ instance->worker = pubsub_connector_worker;
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_labels_json_plaintext;
+ instance->start_chart_formatting = NULL;
+
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_json_plaintext;
+ else
+ instance->metric_formatting = format_dimension_stored_json_plaintext;
+
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = NULL;
+
+ instance->prepare_header = NULL;
+ instance->check_response = NULL;
+
+ instance->buffer = (void *)buffer_create(0);
+ if (!instance->buffer) {
+ error("EXPORTING: cannot create buffer for Pub/Sub exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+ uv_mutex_init(&instance->mutex);
+ uv_cond_init(&instance->cond_var);
+
+ struct pubsub_specific_data *connector_specific_data = callocz(1, sizeof(struct pubsub_specific_data));
+ instance->connector_specific_data = (void *)connector_specific_data;
+
+ struct pubsub_specific_config *connector_specific_config =
+ (struct pubsub_specific_config *)instance->config.connector_specific_config;
+ char error_message[ERROR_LINE_MAX + 1] = "";
+ if (pubsub_init(
+ (void *)connector_specific_data, error_message, instance->config.destination,
+ connector_specific_config->credentials_file, connector_specific_config->project_id,
+ connector_specific_config->topic_id)) {
+ error(
+ "EXPORTING: Cannot initialize a Pub/Sub publisher for instance %s: %s",
+ instance->config.name, error_message);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Clean a PubSub connector instance
+ *
+ * @param instance an instance data structure.
+ */
+void clean_pubsub_instance(struct instance *instance)
+{
+ info("EXPORTING: cleaning up instance %s ...", instance->config.name);
+
+ struct pubsub_specific_data *connector_specific_data =
+ (struct pubsub_specific_data *)instance->connector_specific_data;
+ pubsub_cleanup(connector_specific_data);
+ freez(connector_specific_data);
+
+ buffer_free(instance->buffer);
+
+ struct pubsub_specific_config *connector_specific_config =
+ (struct pubsub_specific_config *)instance->config.connector_specific_config;
+ freez(connector_specific_config->credentials_file);
+ freez(connector_specific_config->project_id);
+ freez(connector_specific_config->topic_id);
+ freez(connector_specific_config);
+
+ info("EXPORTING: instance %s exited", instance->config.name);
+ instance->exited = 1;
+
+ return;
+}
+
+/**
+ * Pub/Sub connector worker
+ *
+ * Runs in a separate thread for every instance.
+ *
+ * @param instance_p an instance data structure.
+ */
+void pubsub_connector_worker(void *instance_p)
+{
+ struct instance *instance = (struct instance *)instance_p;
+ struct pubsub_specific_config *connector_specific_config = instance->config.connector_specific_config;
+ struct pubsub_specific_data *connector_specific_data = instance->connector_specific_data;
+
+ while (!instance->engine->exit) {
+ struct stats *stats = &instance->stats;
+ char error_message[ERROR_LINE_MAX + 1] = "";
+
+ uv_mutex_lock(&instance->mutex);
+ while (!instance->data_is_ready)
+ uv_cond_wait(&instance->cond_var, &instance->mutex);
+ instance->data_is_ready = 0;
+
+
+ if (unlikely(instance->engine->exit)) {
+ uv_mutex_unlock(&instance->mutex);
+ break;
+ }
+
+ // reset the monitoring chart counters
+ stats->received_bytes =
+ stats->sent_bytes =
+ stats->sent_metrics =
+ stats->lost_metrics =
+ stats->receptions =
+ stats->transmission_successes =
+ stats->transmission_failures =
+ stats->data_lost_events =
+ stats->lost_bytes =
+ stats->reconnects = 0;
+
+ BUFFER *buffer = (BUFFER *)instance->buffer;
+ size_t buffer_len = buffer_strlen(buffer);
+
+ stats->buffered_bytes = buffer_len;
+
+ if (pubsub_add_message(instance->connector_specific_data, (char *)buffer_tostring(buffer))) {
+ error("EXPORTING: Instance %s: Cannot add data to a message", instance->config.name);
+
+ stats->data_lost_events++;
+ stats->lost_metrics += stats->buffered_metrics;
+ stats->lost_bytes += buffer_len;
+
+ goto cleanup;
+ }
+
+ debug(
+ D_BACKEND, "EXPORTING: pubsub_publish(): project = %s, topic = %s, buffer = %zu",
+ connector_specific_config->project_id, connector_specific_config->topic_id, buffer_len);
+
+ if (pubsub_publish((void *)connector_specific_data, error_message, stats->buffered_metrics, buffer_len)) {
+ error("EXPORTING: Instance: %s: Cannot publish a message: %s", instance->config.name, error_message);
+
+ stats->transmission_failures++;
+ stats->data_lost_events++;
+ stats->lost_metrics += stats->buffered_metrics;
+ stats->lost_bytes += buffer_len;
+
+ goto cleanup;
+ }
+
+ stats->sent_bytes = buffer_len;
+ stats->transmission_successes++;
+
+ size_t sent_metrics = 0, lost_metrics = 0, sent_bytes = 0, lost_bytes = 0;
+
+ if (unlikely(pubsub_get_result(
+ connector_specific_data, error_message, &sent_metrics, &sent_bytes, &lost_metrics, &lost_bytes))) {
+ // oops! we couldn't send (all or some of the) data
+ error("EXPORTING: %s", error_message);
+ error(
+ "EXPORTING: failed to write data to service '%s'. Willing to write %zu bytes, wrote %zu bytes.",
+ instance->config.destination, lost_bytes, sent_bytes);
+
+ stats->transmission_failures++;
+ stats->data_lost_events++;
+ stats->lost_metrics += lost_metrics;
+ stats->lost_bytes += lost_bytes;
+ } else {
+ stats->receptions++;
+ stats->sent_metrics = sent_metrics;
+ }
+
+ cleanup:
+ send_internal_metrics(instance);
+
+ buffer_flush(buffer);
+ stats->buffered_metrics = 0;
+
+ uv_mutex_unlock(&instance->mutex);
+
+#ifdef UNIT_TESTING
+ return;
+#endif
+ }
+
+ clean_pubsub_instance(instance);
+}
diff --git a/exporting/pubsub/pubsub.h b/exporting/pubsub/pubsub.h
new file mode 100644
index 000000000..0bcb76f9b
--- /dev/null
+++ b/exporting/pubsub/pubsub.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_PUBSUB_H
+#define NETDATA_EXPORTING_PUBSUB_H
+
+#include "exporting/exporting_engine.h"
+#include "exporting/json/json.h"
+#include "pubsub_publish.h"
+
+int init_pubsub_instance(struct instance *instance);
+void clean_pubsub_instance(struct instance *instance);
+void pubsub_connector_worker(void *instance_p);
+
+#endif //NETDATA_EXPORTING_PUBSUB_H
diff --git a/exporting/pubsub/pubsub_publish.cc b/exporting/pubsub/pubsub_publish.cc
new file mode 100644
index 000000000..dc237cf22
--- /dev/null
+++ b/exporting/pubsub/pubsub_publish.cc
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <google/pubsub/v1/pubsub.grpc.pb.h>
+#include <grpcpp/grpcpp.h>
+#include <stdexcept>
+#include "pubsub_publish.h"
+
+#define EVENT_CHECK_TIMEOUT 50
+
+struct response {
+ grpc::ClientContext *context;
+ google::pubsub::v1::PublishResponse *publish_response;
+ size_t tag;
+ grpc::Status *status;
+
+ size_t published_metrics;
+ size_t published_bytes;
+};
+
+static inline void copy_error_message(char *error_message_dst, const char *error_message_src)
+{
+ std::strncpy(error_message_dst, error_message_src, ERROR_LINE_MAX);
+ error_message_dst[ERROR_LINE_MAX] = '\0';
+}
+
+/**
+ * Initialize a Pub/Sub client and a data structure for responses.
+ *
+ * @param pubsub_specific_data_p a pointer to a structure with instance-wide data.
+ * @param error_message report error message to a caller.
+ * @param destination a Pub/Sub service endpoint.
+ * @param credentials_file a full path for a file with google application credentials.
+ * @param project_id a project ID.
+ * @param topic_id a topic ID.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int pubsub_init(
+ void *pubsub_specific_data_p, char *error_message, const char *destination, const char *credentials_file,
+ const char *project_id, const char *topic_id)
+{
+ struct pubsub_specific_data *connector_specific_data = (struct pubsub_specific_data *)pubsub_specific_data_p;
+
+ try {
+ setenv("GOOGLE_APPLICATION_CREDENTIALS", credentials_file, 0);
+
+ std::shared_ptr<grpc::ChannelCredentials> credentials = grpc::GoogleDefaultCredentials();
+ if (credentials == nullptr) {
+ copy_error_message(error_message, "Can't load credentials");
+ return 1;
+ }
+
+ std::shared_ptr<grpc::Channel> channel = grpc::CreateChannel(destination, credentials);
+
+ google::pubsub::v1::Publisher::Stub *stub = new google::pubsub::v1::Publisher::Stub(channel);
+ if (!stub) {
+ copy_error_message(error_message, "Can't create a publisher stub");
+ return 1;
+ }
+
+ connector_specific_data->stub = stub;
+
+ google::pubsub::v1::PublishRequest *request = new google::pubsub::v1::PublishRequest;
+ connector_specific_data->request = request;
+ ((google::pubsub::v1::PublishRequest *)(connector_specific_data->request))
+ ->set_topic(std::string("projects/") + project_id + "/topics/" + topic_id);
+
+ grpc::CompletionQueue *cq = new grpc::CompletionQueue;
+ connector_specific_data->completion_queue = cq;
+
+ connector_specific_data->responses = new std::list<struct response>;
+
+ return 0;
+ } catch (std::exception const &ex) {
+ std::string em(std::string("Standard exception raised: ") + ex.what());
+ copy_error_message(error_message, em.c_str());
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Clean the PubSub connector instance specific data
+ */
+void pubsub_cleanup(void *pubsub_specific_data_p)
+{
+ struct pubsub_specific_data *connector_specific_data = (struct pubsub_specific_data *)pubsub_specific_data_p;
+
+ std::list<struct response> *responses = (std::list<struct response> *)connector_specific_data->responses;
+ std::list<struct response>::iterator response;
+ for (response = responses->begin(); response != responses->end(); ++response) {
+ // TODO: If we do this, there are a huge amount of possibly lost records. We need to find a right way of
+ // cleaning up contexts
+ // delete response->context;
+ delete response->publish_response;
+ delete response->status;
+ }
+ delete responses;
+
+ ((grpc::CompletionQueue *)connector_specific_data->completion_queue)->Shutdown();
+ delete (grpc::CompletionQueue *)connector_specific_data->completion_queue;
+ delete (google::pubsub::v1::PublishRequest *)connector_specific_data->request;
+ delete (google::pubsub::v1::Publisher::Stub *)connector_specific_data->stub;
+
+ // TODO: Find how to shutdown grpc gracefully. grpc_shutdown() doesn't seem to work.
+ // grpc_shutdown();
+
+ return;
+}
+
+/**
+ * Add data to a Pub/Sub request message.
+ *
+ * @param pubsub_specific_data_p a pointer to a structure with instance-wide data.
+ * @param data a text buffer with metrics.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int pubsub_add_message(void *pubsub_specific_data_p, char *data)
+{
+ struct pubsub_specific_data *connector_specific_data = (struct pubsub_specific_data *)pubsub_specific_data_p;
+
+ try {
+ google::pubsub::v1::PubsubMessage *message =
+ ((google::pubsub::v1::PublishRequest *)(connector_specific_data->request))->add_messages();
+ if (!message)
+ return 1;
+
+ message->set_data(data);
+ } catch (std::exception const &ex) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Send data to the Pub/Sub service
+ *
+ * @param pubsub_specific_data_p a pointer to a structure with client and request outcome information.
+ * @param error_message report error message to a caller.
+ * @param buffered_metrics the number of metrics we are going to send.
+ * @param buffered_bytes the number of bytes we are going to send.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int pubsub_publish(void *pubsub_specific_data_p, char *error_message, size_t buffered_metrics, size_t buffered_bytes)
+{
+ struct pubsub_specific_data *connector_specific_data = (struct pubsub_specific_data *)pubsub_specific_data_p;
+
+ try {
+ grpc::ClientContext *context = new grpc::ClientContext;
+
+ std::unique_ptr<grpc::ClientAsyncResponseReader<google::pubsub::v1::PublishResponse> > rpc(
+ ((google::pubsub::v1::Publisher::Stub *)(connector_specific_data->stub))
+ ->AsyncPublish(
+ context, (*(google::pubsub::v1::PublishRequest *)(connector_specific_data->request)),
+ ((grpc::CompletionQueue *)(connector_specific_data->completion_queue))));
+
+ struct response response;
+ response.context = context;
+ response.publish_response = new google::pubsub::v1::PublishResponse;
+ response.tag = connector_specific_data->last_tag++;
+ response.status = new grpc::Status;
+ response.published_metrics = buffered_metrics;
+ response.published_bytes = buffered_bytes;
+
+ rpc->Finish(response.publish_response, response.status, (void *)response.tag);
+
+ ((google::pubsub::v1::PublishRequest *)(connector_specific_data->request))->clear_messages();
+
+ ((std::list<struct response> *)(connector_specific_data->responses))->push_back(response);
+ } catch (std::exception const &ex) {
+ std::string em(std::string("Standard exception raised: ") + ex.what());
+ copy_error_message(error_message, em.c_str());
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Get results from service responces
+ *
+ * @param pubsub_specific_data_p a pointer to a structure with instance-wide data.
+ * @param error_message report error message to a caller.
+ * @param sent_metrics report to a caller how many metrics was successfuly sent.
+ * @param sent_bytes report to a caller how many bytes was successfuly sent.
+ * @param lost_metrics report to a caller how many metrics was lost during transmission.
+ * @param lost_bytes report to a caller how many bytes was lost during transmission.
+ * @return Returns 0 if all data was sent successfully, 1 when data was lost on transmission.
+ */
+int pubsub_get_result(
+ void *pubsub_specific_data_p, char *error_message,
+ size_t *sent_metrics, size_t *sent_bytes, size_t *lost_metrics, size_t *lost_bytes)
+{
+ struct pubsub_specific_data *connector_specific_data = (struct pubsub_specific_data *)pubsub_specific_data_p;
+ std::list<struct response> *responses = (std::list<struct response> *)connector_specific_data->responses;
+ grpc_impl::CompletionQueue::NextStatus next_status;
+
+ *sent_metrics = 0;
+ *sent_bytes = 0;
+ *lost_metrics = 0;
+ *lost_bytes = 0;
+
+ try {
+ do {
+ std::list<struct response>::iterator response;
+ void *got_tag;
+ bool ok = false;
+
+ auto deadline = std::chrono::system_clock::now() + std::chrono::milliseconds(50);
+ next_status = (*(grpc::CompletionQueue *)(connector_specific_data->completion_queue))
+ .AsyncNext(&got_tag, &ok, deadline);
+
+ if (next_status == grpc::CompletionQueue::GOT_EVENT) {
+ for (response = responses->begin(); response != responses->end(); ++response) {
+ if ((void *)response->tag == got_tag)
+ break;
+ }
+
+ if (response == responses->end()) {
+ copy_error_message(error_message, "Cannot get Pub/Sub response");
+ return 1;
+ }
+
+ if (ok && response->publish_response->message_ids_size()) {
+ *sent_metrics += response->published_metrics;
+ *sent_bytes += response->published_bytes;
+ } else {
+ *lost_metrics += response->published_metrics;
+ *lost_bytes += response->published_bytes;
+ response->status->error_message().copy(error_message, ERROR_LINE_MAX);
+ error_message[ERROR_LINE_MAX] = '\0';
+ }
+
+ delete response->context;
+ delete response->publish_response;
+ delete response->status;
+ responses->erase(response);
+ }
+
+ if (next_status == grpc::CompletionQueue::SHUTDOWN) {
+ copy_error_message(error_message, "Completion queue shutdown");
+ return 1;
+ }
+
+ } while (next_status == grpc::CompletionQueue::GOT_EVENT);
+ } catch (std::exception const &ex) {
+ std::string em(std::string("Standard exception raised: ") + ex.what());
+ copy_error_message(error_message, em.c_str());
+ return 1;
+ }
+
+ if (*lost_metrics) {
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/exporting/pubsub/pubsub_publish.h b/exporting/pubsub/pubsub_publish.h
new file mode 100644
index 000000000..567a262f0
--- /dev/null
+++ b/exporting/pubsub/pubsub_publish.h
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EXPORTING_PUBSUB_PUBLISH_H
+#define NETDATA_EXPORTING_PUBSUB_PUBLISH_H
+
+#define ERROR_LINE_MAX 1023
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct pubsub_specific_data {
+ void *stub;
+ void *request;
+ void *completion_queue;
+
+ void *responses;
+ size_t last_tag;
+};
+
+int pubsub_init(
+ void *pubsub_specific_data_p, char *error_message, const char *destination, const char *credentials_file,
+ const char *project_id, const char *topic_id);
+void pubsub_cleanup(void *pubsub_specific_data_p);
+
+int pubsub_add_message(void *pubsub_specific_data_p, char *data);
+
+int pubsub_publish(void *pubsub_specific_data_p, char *error_message, size_t buffered_metrics, size_t buffered_bytes);
+int pubsub_get_result(
+ void *pubsub_specific_data_p, char *error_message,
+ size_t *sent_metrics, size_t *sent_bytes, size_t *lost_metrics, size_t *lost_bytes);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif //NETDATA_EXPORTING_PUBSUB_PUBLISH_H
diff --git a/exporting/read_config.c b/exporting/read_config.c
new file mode 100644
index 000000000..995ba578f
--- /dev/null
+++ b/exporting/read_config.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+
+struct config exporting_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+struct instance *prometheus_exporter_instance = NULL;
+
+static _CONNECTOR_INSTANCE *find_instance(const char *section)
+{
+ _CONNECTOR_INSTANCE *local_ci;
+
+ local_ci = add_connector_instance(NULL, NULL); // Get root section
+ if (unlikely(!local_ci))
+ return local_ci;
+
+ if (!section)
+ return local_ci;
+
+ while (local_ci) {
+ if (!strcmp(local_ci->instance_name, section))
+ break;
+ local_ci = local_ci->next;
+ }
+ return local_ci;
+}
+
+char *expconfig_get(struct config *root, const char *section, const char *name, const char *default_value)
+{
+ _CONNECTOR_INSTANCE *local_ci;
+
+ if (!strcmp(section, CONFIG_SECTION_EXPORTING))
+ return appconfig_get(root, CONFIG_SECTION_EXPORTING, name, default_value);
+
+ local_ci = find_instance(section);
+
+ if (!local_ci)
+ return NULL; // TODO: Check if it is meaningful to return default_value
+
+ return appconfig_get(
+ root, local_ci->instance_name, name,
+ appconfig_get(
+ root, local_ci->connector_name, name, appconfig_get(root, CONFIG_SECTION_EXPORTING, name, default_value)));
+}
+
+int expconfig_get_boolean(struct config *root, const char *section, const char *name, int default_value)
+{
+ _CONNECTOR_INSTANCE *local_ci;
+
+ if (!strcmp(section, CONFIG_SECTION_EXPORTING))
+ return appconfig_get_boolean(root, CONFIG_SECTION_EXPORTING, name, default_value);
+
+ local_ci = find_instance(section);
+
+ if (!local_ci)
+ return 0; // TODO: Check if it is meaningful to return default_value
+
+ return appconfig_get_boolean(
+ root, local_ci->instance_name, name,
+ appconfig_get_boolean(
+ root, local_ci->connector_name, name,
+ appconfig_get_boolean(root, CONFIG_SECTION_EXPORTING, name, default_value)));
+}
+
+long long expconfig_get_number(struct config *root, const char *section, const char *name, long long default_value)
+{
+ _CONNECTOR_INSTANCE *local_ci;
+
+ if (!strcmp(section, CONFIG_SECTION_EXPORTING))
+ return appconfig_get_number(root, CONFIG_SECTION_EXPORTING, name, default_value);
+
+ local_ci = find_instance(section);
+
+ if (!local_ci)
+ return 0; // TODO: Check if it is meaningful to return default_value
+
+ return appconfig_get_number(
+ root, local_ci->instance_name, name,
+ appconfig_get_number(
+ root, local_ci->connector_name, name,
+ appconfig_get_number(root, CONFIG_SECTION_EXPORTING, name, default_value)));
+}
+
+/*
+ * Get the next connector instance that we need to activate
+ *
+ * @param @target_ci will be filled with instance name and connector name
+ *
+ * @return - 1 if more connectors to be fetched, 0 done
+ *
+ */
+
+int get_connector_instance(struct connector_instance *target_ci)
+{
+ static _CONNECTOR_INSTANCE *local_ci = NULL;
+ _CONNECTOR_INSTANCE *global_connector_instance;
+
+ global_connector_instance = find_instance(NULL); // Fetch head of instances
+
+ if (unlikely(!global_connector_instance))
+ return 0;
+
+ if (target_ci == NULL) {
+ local_ci = NULL;
+ return 1;
+ }
+ if (local_ci == NULL)
+ local_ci = global_connector_instance;
+ else {
+ local_ci = local_ci->next;
+ if (local_ci == NULL)
+ return 0;
+ }
+
+ strcpy(target_ci->instance_name, local_ci->instance_name);
+ strcpy(target_ci->connector_name, local_ci->connector_name);
+
+ return 1;
+}
+
+/**
+ * Select Type
+ *
+ * Select the connector type based on the user input
+ *
+ * @param type is the string that defines the connector type
+ *
+ * @return It returns the connector id.
+ */
+EXPORTING_CONNECTOR_TYPE exporting_select_type(const char *type)
+{
+ if (!strcmp(type, "graphite") || !strcmp(type, "graphite:plaintext")) {
+ return EXPORTING_CONNECTOR_TYPE_GRAPHITE;
+ } else if (!strcmp(type, "graphite:http") || !strcmp(type, "graphite:https")) {
+ return EXPORTING_CONNECTOR_TYPE_GRAPHITE_HTTP;
+ } else if (!strcmp(type, "json") || !strcmp(type, "json:plaintext")) {
+ return EXPORTING_CONNECTOR_TYPE_JSON;
+ } else if (!strcmp(type, "json:http") || !strcmp(type, "json:https")) {
+ return EXPORTING_CONNECTOR_TYPE_JSON_HTTP;
+ } else if (!strcmp(type, "opentsdb") || !strcmp(type, "opentsdb:telnet")) {
+ return EXPORTING_CONNECTOR_TYPE_OPENTSDB;
+ } else if (!strcmp(type, "opentsdb:http") || !strcmp(type, "opentsdb:https")) {
+ return EXPORTING_CONNECTOR_TYPE_OPENTSDB_HTTP;
+ } else if (
+ !strcmp(type, "prometheus_remote_write") ||
+ !strcmp(type, "prometheus_remote_write:http") ||
+ !strcmp(type, "prometheus_remote_write:https")) {
+ return EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE;
+ } else if (!strcmp(type, "kinesis") || !strcmp(type, "kinesis:plaintext")) {
+ return EXPORTING_CONNECTOR_TYPE_KINESIS;
+ } else if (!strcmp(type, "pubsub") || !strcmp(type, "pubsub:plaintext")) {
+ return EXPORTING_CONNECTOR_TYPE_PUBSUB;
+ } else if (!strcmp(type, "mongodb") || !strcmp(type, "mongodb:plaintext"))
+ return EXPORTING_CONNECTOR_TYPE_MONGODB;
+
+ return EXPORTING_CONNECTOR_TYPE_UNKNOWN;
+}
+
+EXPORTING_OPTIONS exporting_parse_data_source(const char *data_source, EXPORTING_OPTIONS exporting_options)
+{
+ if (!strcmp(data_source, "raw") || !strcmp(data_source, "as collected") || !strcmp(data_source, "as-collected") ||
+ !strcmp(data_source, "as_collected") || !strcmp(data_source, "ascollected")) {
+ exporting_options |= EXPORTING_SOURCE_DATA_AS_COLLECTED;
+ exporting_options &= ~(EXPORTING_OPTIONS_SOURCE_BITS ^ EXPORTING_SOURCE_DATA_AS_COLLECTED);
+ } else if (!strcmp(data_source, "average")) {
+ exporting_options |= EXPORTING_SOURCE_DATA_AVERAGE;
+ exporting_options &= ~(EXPORTING_OPTIONS_SOURCE_BITS ^ EXPORTING_SOURCE_DATA_AVERAGE);
+ } else if (!strcmp(data_source, "sum") || !strcmp(data_source, "volume")) {
+ exporting_options |= EXPORTING_SOURCE_DATA_SUM;
+ exporting_options &= ~(EXPORTING_OPTIONS_SOURCE_BITS ^ EXPORTING_SOURCE_DATA_SUM);
+ } else {
+ error("EXPORTING: invalid data data_source method '%s'.", data_source);
+ }
+
+ return exporting_options;
+}
+
+/**
+ * Read configuration
+ *
+ * Based on read configuration an engine data structure is filled with exporting connector instances.
+ *
+ * @return Returns a filled engine data structure or NULL if there are no connector instances configured.
+ */
+struct engine *read_exporting_config()
+{
+ int instances_to_activate = 0;
+ int exporting_config_exists = 0;
+
+ static struct engine *engine = NULL;
+ struct connector_instance_list {
+ struct connector_instance local_ci;
+ EXPORTING_CONNECTOR_TYPE backend_type;
+
+ struct connector_instance_list *next;
+ };
+ struct connector_instance local_ci;
+ struct connector_instance_list *tmp_ci_list = NULL, *tmp_ci_list1 = NULL, *tmp_ci_list_prev = NULL;
+
+ if (unlikely(engine))
+ return engine;
+
+ char *filename = strdupz_path_subpath(netdata_configured_user_config_dir, EXPORTING_CONF);
+
+ exporting_config_exists = appconfig_load(&exporting_config, filename, 0, NULL);
+ if (!exporting_config_exists) {
+ info("CONFIG: cannot load user exporting config '%s'. Will try the stock version.", filename);
+ freez(filename);
+
+ filename = strdupz_path_subpath(netdata_configured_stock_config_dir, EXPORTING_CONF);
+ exporting_config_exists = appconfig_load(&exporting_config, filename, 0, NULL);
+ if (!exporting_config_exists)
+ info("CONFIG: cannot load stock exporting config '%s'. Running with internal defaults.", filename);
+ }
+
+ freez(filename);
+
+#define prometheus_config_get(name, value) \
+ appconfig_get( \
+ &exporting_config, CONFIG_SECTION_PROMETHEUS, name, \
+ appconfig_get(&exporting_config, CONFIG_SECTION_EXPORTING, name, value))
+#define prometheus_config_get_number(name, value) \
+ appconfig_get_number( \
+ &exporting_config, CONFIG_SECTION_PROMETHEUS, name, \
+ appconfig_get_number(&exporting_config, CONFIG_SECTION_EXPORTING, name, value))
+#define prometheus_config_get_boolean(name, value) \
+ appconfig_get_boolean( \
+ &exporting_config, CONFIG_SECTION_PROMETHEUS, name, \
+ appconfig_get_boolean(&exporting_config, CONFIG_SECTION_EXPORTING, name, value))
+
+ if (!prometheus_exporter_instance) {
+ prometheus_exporter_instance = callocz(1, sizeof(struct instance));
+
+ prometheus_exporter_instance->config.update_every =
+ prometheus_config_get_number(EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
+
+ // wait for backend subsystem to be initialized
+ for (int retries = 0; !global_backend_source && retries < 1000; retries++)
+ sleep_usec(10000);
+
+ if (!global_backend_source)
+ global_backend_source = "average";
+
+ prometheus_exporter_instance->config.options |= global_backend_options & EXPORTING_OPTIONS_SOURCE_BITS;
+
+ char *data_source = prometheus_config_get("data source", global_backend_source);
+ prometheus_exporter_instance->config.options =
+ exporting_parse_data_source(data_source, prometheus_exporter_instance->config.options);
+
+ if (prometheus_config_get_boolean(
+ "send names instead of ids", global_backend_options & EXPORTING_OPTION_SEND_NAMES))
+ prometheus_exporter_instance->config.options |= EXPORTING_OPTION_SEND_NAMES;
+ else
+ prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_NAMES;
+
+ if (prometheus_config_get_boolean("send configured labels", CONFIG_BOOLEAN_YES))
+ prometheus_exporter_instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ else
+ prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+
+ if (prometheus_config_get_boolean("send automatic labels", CONFIG_BOOLEAN_NO))
+ prometheus_exporter_instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+ else
+ prometheus_exporter_instance->config.options &= ~EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ prometheus_exporter_instance->config.charts_pattern =
+ simple_pattern_create(prometheus_config_get("send charts matching", "*"), NULL, SIMPLE_PATTERN_EXACT);
+ prometheus_exporter_instance->config.hosts_pattern = simple_pattern_create(
+ prometheus_config_get("send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT);
+
+ prometheus_exporter_instance->config.prefix = prometheus_config_get("prefix", global_backend_prefix);
+ }
+
+ // TODO: change BACKEND to EXPORTING
+ while (get_connector_instance(&local_ci)) {
+ info("Processing connector instance (%s)", local_ci.instance_name);
+
+ if (exporter_get_boolean(local_ci.instance_name, "enabled", 0)) {
+ info(
+ "Instance (%s) on connector (%s) is enabled and scheduled for activation",
+ local_ci.instance_name, local_ci.connector_name);
+
+ tmp_ci_list = (struct connector_instance_list *)callocz(1, sizeof(struct connector_instance_list));
+ memcpy(&tmp_ci_list->local_ci, &local_ci, sizeof(local_ci));
+ tmp_ci_list->backend_type = exporting_select_type(local_ci.connector_name);
+ tmp_ci_list->next = tmp_ci_list_prev;
+ tmp_ci_list_prev = tmp_ci_list;
+ instances_to_activate++;
+ } else
+ info("Instance (%s) on connector (%s) is not enabled", local_ci.instance_name, local_ci.connector_name);
+ }
+
+ if (unlikely(!instances_to_activate)) {
+ info("No connector instances to activate");
+ return NULL;
+ }
+
+ engine = (struct engine *)callocz(1, sizeof(struct engine));
+ // TODO: Check and fill engine fields if actually needed
+
+ if (exporting_config_exists) {
+ engine->config.hostname =
+ strdupz(exporter_get(CONFIG_SECTION_EXPORTING, "hostname", netdata_configured_hostname));
+ engine->config.update_every = exporter_get_number(
+ CONFIG_SECTION_EXPORTING, EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
+ }
+
+ while (tmp_ci_list) {
+ struct instance *tmp_instance;
+ char *instance_name;
+ char *default_destination = "localhost";
+
+ info("Instance %s on %s", tmp_ci_list->local_ci.instance_name, tmp_ci_list->local_ci.connector_name);
+
+ if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_UNKNOWN) {
+ error("Unknown exporting connector type");
+ goto next_connector_instance;
+ }
+
+#ifndef ENABLE_PROMETHEUS_REMOTE_WRITE
+ if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) {
+ error("Prometheus Remote Write support isn't compiled");
+ goto next_connector_instance;
+ }
+#endif
+
+#ifndef HAVE_KINESIS
+ if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_KINESIS) {
+ error("AWS Kinesis support isn't compiled");
+ goto next_connector_instance;
+ }
+#endif
+
+#ifndef ENABLE_EXPORTING_PUBSUB
+ if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_PUBSUB) {
+ error("Google Cloud Pub/Sub support isn't compiled");
+ goto next_connector_instance;
+ }
+#endif
+
+#ifndef HAVE_MONGOC
+ if (tmp_ci_list->backend_type == EXPORTING_CONNECTOR_TYPE_MONGODB) {
+ error("MongoDB support isn't compiled");
+ goto next_connector_instance;
+ }
+#endif
+
+ tmp_instance = (struct instance *)callocz(1, sizeof(struct instance));
+ tmp_instance->next = engine->instance_root;
+ engine->instance_root = tmp_instance;
+
+ tmp_instance->engine = engine;
+ tmp_instance->config.type = tmp_ci_list->backend_type;
+
+ instance_name = tmp_ci_list->local_ci.instance_name;
+
+ tmp_instance->config.type_name = strdupz(tmp_ci_list->local_ci.connector_name);
+ tmp_instance->config.name = strdupz(tmp_ci_list->local_ci.instance_name);
+
+
+ tmp_instance->config.update_every =
+ exporter_get_number(instance_name, EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
+
+ tmp_instance->config.buffer_on_failures = exporter_get_number(instance_name, "buffer on failures", 10);
+
+ tmp_instance->config.timeoutms = exporter_get_number(instance_name, "timeout ms", 10000);
+
+ tmp_instance->config.charts_pattern =
+ simple_pattern_create(exporter_get(instance_name, "send charts matching", "*"), NULL, SIMPLE_PATTERN_EXACT);
+
+ tmp_instance->config.hosts_pattern = simple_pattern_create(
+ exporter_get(instance_name, "send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT);
+
+ char *data_source = exporter_get(instance_name, "data source", "average");
+
+ tmp_instance->config.options = exporting_parse_data_source(data_source, tmp_instance->config.options);
+ if (EXPORTING_OPTIONS_DATA_SOURCE(tmp_instance->config.options) != EXPORTING_SOURCE_DATA_AS_COLLECTED &&
+ tmp_instance->config.update_every % localhost->rrd_update_every)
+ info(
+ "The update interval %d for instance %s is not a multiple of the database update interval %d. "
+ "Metric values will deviate at different points in time.",
+ tmp_instance->config.update_every, tmp_instance->config.name, localhost->rrd_update_every);
+
+ if (exporter_get_boolean(instance_name, "send configured labels", CONFIG_BOOLEAN_YES))
+ tmp_instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ else
+ tmp_instance->config.options &= ~EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+
+ if (exporter_get_boolean(instance_name, "send automatic labels", CONFIG_BOOLEAN_NO))
+ tmp_instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+ else
+ tmp_instance->config.options &= ~EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ if (exporter_get_boolean(instance_name, "send names instead of ids", CONFIG_BOOLEAN_YES))
+ tmp_instance->config.options |= EXPORTING_OPTION_SEND_NAMES;
+ else
+ tmp_instance->config.options &= ~EXPORTING_OPTION_SEND_NAMES;
+
+ if (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) {
+ struct prometheus_remote_write_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct prometheus_remote_write_specific_config));
+
+ tmp_instance->config.connector_specific_config = connector_specific_config;
+
+ connector_specific_config->remote_write_path =
+ strdupz(exporter_get(instance_name, "remote write URL path", "/receive"));
+ }
+
+ if (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_KINESIS) {
+ struct aws_kinesis_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct aws_kinesis_specific_config));
+
+ default_destination = "us-east-1";
+
+ tmp_instance->config.connector_specific_config = connector_specific_config;
+
+ connector_specific_config->stream_name = strdupz(exporter_get(instance_name, "stream name", ""));
+
+ connector_specific_config->auth_key_id = strdupz(exporter_get(instance_name, "aws_access_key_id", ""));
+ connector_specific_config->secure_key = strdupz(exporter_get(instance_name, "aws_secret_access_key", ""));
+ }
+
+ if (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_PUBSUB) {
+ struct pubsub_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct pubsub_specific_config));
+
+ default_destination = "pubsub.googleapis.com";
+
+ tmp_instance->config.connector_specific_config = connector_specific_config;
+
+ connector_specific_config->credentials_file = strdupz(exporter_get(instance_name, "credentials file", ""));
+ connector_specific_config->project_id = strdupz(exporter_get(instance_name, "project id", ""));
+ connector_specific_config->topic_id = strdupz(exporter_get(instance_name, "topic id", ""));
+ }
+
+ if (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_MONGODB) {
+ struct mongodb_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct mongodb_specific_config));
+
+ tmp_instance->config.connector_specific_config = connector_specific_config;
+
+ connector_specific_config->database = strdupz(exporter_get(
+ instance_name, "database", ""));
+
+ connector_specific_config->collection = strdupz(exporter_get(
+ instance_name, "collection", ""));
+ }
+
+ tmp_instance->config.destination = strdupz(exporter_get(instance_name, "destination", default_destination));
+
+ tmp_instance->config.prefix = strdupz(exporter_get(instance_name, "prefix", "netdata"));
+
+ tmp_instance->config.hostname = strdupz(exporter_get(instance_name, "hostname", engine->config.hostname));
+
+#ifdef ENABLE_HTTPS
+
+#define STR_GRAPHITE_HTTPS "graphite:https"
+#define STR_JSON_HTTPS "json:https"
+#define STR_OPENTSDB_HTTPS "opentsdb:https"
+#define STR_PROMETHEUS_REMOTE_WRITE_HTTPS "prometheus_remote_write:https"
+
+ if ((tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_GRAPHITE_HTTP &&
+ !strncmp(tmp_ci_list->local_ci.connector_name, STR_GRAPHITE_HTTPS, strlen(STR_GRAPHITE_HTTPS))) ||
+ (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_JSON_HTTP &&
+ !strncmp(tmp_ci_list->local_ci.connector_name, STR_JSON_HTTPS, strlen(STR_JSON_HTTPS))) ||
+ (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_OPENTSDB_HTTP &&
+ !strncmp(tmp_ci_list->local_ci.connector_name, STR_OPENTSDB_HTTPS, strlen(STR_OPENTSDB_HTTPS))) ||
+ (tmp_instance->config.type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE &&
+ !strncmp(
+ tmp_ci_list->local_ci.connector_name, STR_PROMETHEUS_REMOTE_WRITE_HTTPS,
+ strlen(STR_PROMETHEUS_REMOTE_WRITE_HTTPS)))) {
+ tmp_instance->config.options |= EXPORTING_OPTION_USE_TLS;
+ }
+#endif
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info(
+ " Dest=[%s], upd=[%d], buffer=[%d] timeout=[%ld] options=[%u]",
+ tmp_instance->config.destination,
+ tmp_instance->config.update_every,
+ tmp_instance->config.buffer_on_failures,
+ tmp_instance->config.timeoutms,
+ tmp_instance->config.options);
+#endif
+
+ if (unlikely(!exporting_config_exists) && !engine->config.hostname) {
+ engine->config.hostname = strdupz(config_get(instance_name, "hostname", netdata_configured_hostname));
+ engine->config.update_every =
+ config_get_number(instance_name, EXPORTING_UPDATE_EVERY_OPTION_NAME, EXPORTING_UPDATE_EVERY_DEFAULT);
+ }
+
+ next_connector_instance:
+ tmp_ci_list1 = tmp_ci_list->next;
+ freez(tmp_ci_list);
+ tmp_ci_list = tmp_ci_list1;
+ }
+
+ return engine;
+}
diff --git a/exporting/send_data.c b/exporting/send_data.c
new file mode 100644
index 000000000..1e932e98f
--- /dev/null
+++ b/exporting/send_data.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+
+/**
+ * Check if TLS is enabled in the configuration
+ *
+ * @param type buffer with response data.
+ * @param options an instance data structure.
+ * @return Returns 1 if TLS should be enabled, 0 otherwise.
+ */
+static int exporting_tls_is_enabled(EXPORTING_CONNECTOR_TYPE type, EXPORTING_OPTIONS options)
+{
+ return (type == EXPORTING_CONNECTOR_TYPE_GRAPHITE_HTTP ||
+ type == EXPORTING_CONNECTOR_TYPE_JSON_HTTP ||
+ type == EXPORTING_CONNECTOR_TYPE_OPENTSDB_HTTP ||
+ type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) &&
+ options & EXPORTING_OPTION_USE_TLS;
+}
+
+/**
+ * Discard response
+ *
+ * Discards a response received by an exporting connector instance after logging a sample of it to error.log
+ *
+ * @param buffer buffer with response data.
+ * @param instance an instance data structure.
+ * @return Always returns 0.
+ */
+int exporting_discard_response(BUFFER *buffer, struct instance *instance) {
+#if NETDATA_INTERNAL_CHECKS
+ char sample[1024];
+ const char *s = buffer_tostring(buffer);
+ char *d = sample, *e = &sample[sizeof(sample) - 1];
+
+ for(; *s && d < e ;s++) {
+ char c = *s;
+ if(unlikely(!isprint(c))) c = ' ';
+ *d++ = c;
+ }
+ *d = '\0';
+
+ debug(
+ D_BACKEND,
+ "EXPORTING: received %zu bytes from %s connector instance. Ignoring them. Sample: '%s'",
+ buffer_strlen(buffer),
+ instance->config.name,
+ sample);
+#else
+ UNUSED(instance);
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ buffer_flush(buffer);
+ return 0;
+}
+
+/**
+ * Receive response
+ *
+ * @param sock communication socket.
+ * @param instance an instance data structure.
+ */
+void simple_connector_receive_response(int *sock, struct instance *instance)
+{
+ static BUFFER *response = NULL;
+ if (!response)
+ response = buffer_create(4096);
+
+ struct stats *stats = &instance->stats;
+#ifdef ENABLE_HTTPS
+ uint32_t options = (uint32_t)instance->config.options;
+ struct simple_connector_data *connector_specific_data = instance->connector_specific_data;
+
+ if (options & EXPORTING_OPTION_USE_TLS)
+ ERR_clear_error();
+#endif
+
+ errno = 0;
+
+ // loop through to collect all data
+ while (*sock != -1 && errno != EWOULDBLOCK) {
+ ssize_t r;
+#ifdef ENABLE_HTTPS
+ if (exporting_tls_is_enabled(instance->config.type, options) &&
+ connector_specific_data->conn &&
+ connector_specific_data->flags == NETDATA_SSL_HANDSHAKE_COMPLETE) {
+ r = (ssize_t)SSL_read(connector_specific_data->conn,
+ &response->buffer[response->len],
+ (int) (response->size - response->len));
+
+ if (likely(r > 0)) {
+ // we received some data
+ response->len += r;
+ stats->received_bytes += r;
+ stats->receptions++;
+ continue;
+ } else {
+ int sslerrno = SSL_get_error(connector_specific_data->conn, (int) r);
+ u_long sslerr = ERR_get_error();
+ char buf[256];
+ switch (sslerrno) {
+ case SSL_ERROR_WANT_READ:
+ case SSL_ERROR_WANT_WRITE:
+ goto endloop;
+ default:
+ ERR_error_string_n(sslerr, buf, sizeof(buf));
+ error("SSL error (%s)",
+ ERR_error_string((long)SSL_get_error(connector_specific_data->conn, (int)r), NULL));
+ goto endloop;
+ }
+ }
+ } else {
+ r = recv(*sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT);
+ }
+#else
+ r = recv(*sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT);
+#endif
+ if (likely(r > 0)) {
+ // we received some data
+ response->len += r;
+ stats->received_bytes += r;
+ stats->receptions++;
+ } else if (r == 0) {
+ error("EXPORTING: '%s' closed the socket", instance->config.destination);
+ close(*sock);
+ *sock = -1;
+ } else {
+ // failed to receive data
+ if (errno != EAGAIN && errno != EWOULDBLOCK) {
+ error("EXPORTING: cannot receive data from '%s'.", instance->config.destination);
+ }
+ }
+
+#ifdef UNIT_TESTING
+ break;
+#endif
+ }
+#ifdef ENABLE_HTTPS
+endloop:
+#endif
+
+ // if we received data, process them
+ if (buffer_strlen(response))
+ instance->check_response(response, instance);
+}
+
+/**
+ * Send buffer to a server
+ *
+ * @param sock communication socket.
+ * @param failures the number of communication failures.
+ * @param instance an instance data structure.
+ */
+void simple_connector_send_buffer(
+ int *sock, int *failures, struct instance *instance, BUFFER *header, BUFFER *buffer, size_t buffered_metrics)
+{
+ int flags = 0;
+#ifdef MSG_NOSIGNAL
+ flags += MSG_NOSIGNAL;
+#endif
+
+#ifdef ENABLE_HTTPS
+ uint32_t options = (uint32_t)instance->config.options;
+ struct simple_connector_data *connector_specific_data = instance->connector_specific_data;
+
+ if (options & EXPORTING_OPTION_USE_TLS)
+ ERR_clear_error();
+#endif
+
+ struct stats *stats = &instance->stats;
+ ssize_t header_sent_bytes = 0;
+ ssize_t buffer_sent_bytes = 0;
+ size_t header_len = buffer_strlen(header);
+ size_t buffer_len = buffer_strlen(buffer);
+
+#ifdef ENABLE_HTTPS
+ if (exporting_tls_is_enabled(instance->config.type, options) &&
+ connector_specific_data->conn &&
+ connector_specific_data->flags == NETDATA_SSL_HANDSHAKE_COMPLETE) {
+ if (header_len)
+ header_sent_bytes = (ssize_t)SSL_write(connector_specific_data->conn, buffer_tostring(header), header_len);
+ if ((size_t)header_sent_bytes == header_len)
+ buffer_sent_bytes = (ssize_t)SSL_write(connector_specific_data->conn, buffer_tostring(buffer), buffer_len);
+ } else {
+ if (header_len)
+ header_sent_bytes = send(*sock, buffer_tostring(header), header_len, flags);
+ if ((size_t)header_sent_bytes == header_len)
+ buffer_sent_bytes = send(*sock, buffer_tostring(buffer), buffer_len, flags);
+ }
+#else
+ if (header_len)
+ header_sent_bytes = send(*sock, buffer_tostring(header), header_len, flags);
+ if ((size_t)header_sent_bytes == header_len)
+ buffer_sent_bytes = send(*sock, buffer_tostring(buffer), buffer_len, flags);
+#endif
+
+ if ((size_t)buffer_sent_bytes == buffer_len) {
+ // we sent the data successfully
+ stats->transmission_successes++;
+ stats->sent_metrics += buffered_metrics;
+ stats->sent_bytes += buffer_sent_bytes;
+
+ // reset the failures count
+ *failures = 0;
+
+ // empty the buffer
+ buffer_flush(buffer);
+ } else {
+ // oops! we couldn't send (all or some of the) data
+ error(
+ "EXPORTING: failed to write data to '%s'. Willing to write %zu bytes, wrote %zd bytes. Will re-connect.",
+ instance->config.destination,
+ buffer_len,
+ buffer_sent_bytes);
+ stats->transmission_failures++;
+
+ if(buffer_sent_bytes != -1)
+ stats->sent_bytes += buffer_sent_bytes;
+
+ // increment the counter we check for data loss
+ (*failures)++;
+
+ // close the socket - we will re-open it next time
+ close(*sock);
+ *sock = -1;
+ }
+}
+
+/**
+ * Simple connector worker
+ *
+ * Runs in a separate thread for every instance.
+ *
+ * @param instance_p an instance data structure.
+ */
+void simple_connector_worker(void *instance_p)
+{
+ struct instance *instance = (struct instance*)instance_p;
+ struct simple_connector_data *connector_specific_data = instance->connector_specific_data;
+
+#ifdef ENABLE_HTTPS
+ uint32_t options = (uint32_t)instance->config.options;
+
+ if (options & EXPORTING_OPTION_USE_TLS)
+ ERR_clear_error();
+#endif
+ struct simple_connector_config *connector_specific_config = instance->config.connector_specific_config;
+
+ int sock = -1;
+ struct timeval timeout = { .tv_sec = (instance->config.timeoutms * 1000) / 1000000,
+ .tv_usec = (instance->config.timeoutms * 1000) % 1000000 };
+ int failures = 0;
+
+ while (!instance->engine->exit) {
+ struct stats *stats = &instance->stats;
+ int send_stats = 0;
+
+ if (instance->data_is_ready)
+ send_stats = 1;
+
+ uv_mutex_lock(&instance->mutex);
+ if (!connector_specific_data->first_buffer->used || failures) {
+ while (!instance->data_is_ready)
+ uv_cond_wait(&instance->cond_var, &instance->mutex);
+ instance->data_is_ready = 0;
+ send_stats = 1;
+ }
+
+ if (unlikely(instance->engine->exit)) {
+ uv_mutex_unlock(&instance->mutex);
+ break;
+ }
+
+ // ------------------------------------------------------------------------
+ // detach buffer
+
+ size_t buffered_metrics;
+
+ if (!connector_specific_data->previous_buffer ||
+ (connector_specific_data->previous_buffer == connector_specific_data->first_buffer &&
+ connector_specific_data->first_buffer->used == 1)) {
+ BUFFER *header, *buffer;
+
+ header = connector_specific_data->first_buffer->header;
+ buffer = connector_specific_data->first_buffer->buffer;
+ connector_specific_data->buffered_metrics = connector_specific_data->first_buffer->buffered_metrics;
+ connector_specific_data->buffered_bytes = connector_specific_data->first_buffer->buffered_bytes;
+
+ buffered_metrics = connector_specific_data->buffered_metrics;
+
+ buffer_flush(connector_specific_data->header);
+ connector_specific_data->first_buffer->header = connector_specific_data->header;
+ connector_specific_data->header = header;
+
+ buffer_flush(connector_specific_data->buffer);
+ connector_specific_data->first_buffer->buffer = connector_specific_data->buffer;
+ connector_specific_data->buffer = buffer;
+ } else {
+ buffered_metrics = connector_specific_data->buffered_metrics;
+ }
+
+ uv_mutex_unlock(&instance->mutex);
+
+ // ------------------------------------------------------------------------
+ // if we are connected, receive a response, without blocking
+
+ if (likely(sock != -1))
+ simple_connector_receive_response(&sock, instance);
+
+ // ------------------------------------------------------------------------
+ // if we are not connected, connect to a data collecting server
+
+ if (unlikely(sock == -1)) {
+ size_t reconnects = 0;
+
+ sock = connect_to_one_of(
+ instance->config.destination, connector_specific_config->default_port, &timeout, &reconnects, NULL, 0);
+#ifdef ENABLE_HTTPS
+ if (exporting_tls_is_enabled(instance->config.type, options) && sock != -1) {
+ if (netdata_exporting_ctx) {
+ if (sock_delnonblock(sock) < 0)
+ error("Exporting cannot remove the non-blocking flag from socket %d", sock);
+
+ if (connector_specific_data->conn == NULL) {
+ connector_specific_data->conn = SSL_new(netdata_exporting_ctx);
+ if (connector_specific_data->conn == NULL) {
+ error("Failed to allocate SSL structure to socket %d.", sock);
+ connector_specific_data->flags = NETDATA_SSL_NO_HANDSHAKE;
+ }
+ } else {
+ SSL_clear(connector_specific_data->conn);
+ }
+
+ if (connector_specific_data->conn) {
+ if (SSL_set_fd(connector_specific_data->conn, sock) != 1) {
+ error("Failed to set the socket to the SSL on socket fd %d.", sock);
+ connector_specific_data->flags = NETDATA_SSL_NO_HANDSHAKE;
+ } else {
+ connector_specific_data->flags = NETDATA_SSL_HANDSHAKE_COMPLETE;
+ SSL_set_connect_state(connector_specific_data->conn);
+ int err = SSL_connect(connector_specific_data->conn);
+ if (err != 1) {
+ err = SSL_get_error(connector_specific_data->conn, err);
+ error(
+ "SSL cannot connect with the server: %s ",
+ ERR_error_string((long)SSL_get_error(connector_specific_data->conn, err), NULL));
+ connector_specific_data->flags = NETDATA_SSL_NO_HANDSHAKE;
+ } else {
+ info("Exporting established a SSL connection.");
+
+ struct timeval tv;
+ tv.tv_sec = timeout.tv_sec / 4;
+ tv.tv_usec = 0;
+
+ if (!tv.tv_sec)
+ tv.tv_sec = 2;
+
+ if (setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (const char *)&tv, sizeof(tv)))
+ error("Cannot set timeout to socket %d, this can block communication", sock);
+ }
+ }
+ }
+ }
+ }
+#endif
+
+ stats->reconnects += reconnects;
+ }
+
+ if (unlikely(instance->engine->exit))
+ break;
+
+ // ------------------------------------------------------------------------
+ // if we are connected, send our buffer to the data collecting server
+
+ failures = 0;
+
+ if (likely(sock != -1)) {
+ simple_connector_send_buffer(
+ &sock,
+ &failures,
+ instance,
+ connector_specific_data->header,
+ connector_specific_data->buffer,
+ buffered_metrics);
+ } else {
+ error("EXPORTING: failed to update '%s'", instance->config.destination);
+ stats->transmission_failures++;
+
+ // increment the counter we check for data loss
+ failures++;
+ }
+
+ if (!failures) {
+ connector_specific_data->first_buffer->buffered_metrics =
+ connector_specific_data->first_buffer->buffered_bytes = connector_specific_data->first_buffer->used = 0;
+ connector_specific_data->first_buffer = connector_specific_data->first_buffer->next;
+ }
+
+ if (unlikely(instance->engine->exit))
+ break;
+
+ if (send_stats) {
+ uv_mutex_lock(&instance->mutex);
+
+ stats->buffered_metrics = connector_specific_data->total_buffered_metrics;
+
+ send_internal_metrics(instance);
+
+ stats->buffered_metrics = 0;
+
+ // reset the internal monitoring chart counters
+ connector_specific_data->total_buffered_metrics =
+ stats->buffered_bytes =
+ stats->receptions =
+ stats->received_bytes =
+ stats->sent_metrics =
+ stats->sent_bytes =
+ stats->transmission_successes =
+ stats->transmission_failures =
+ stats->reconnects =
+ stats->data_lost_events =
+ stats->lost_metrics =
+ stats->lost_bytes = 0;
+
+ uv_mutex_unlock(&instance->mutex);
+ }
+
+#ifdef UNIT_TESTING
+ return;
+#endif
+ }
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ if (instance->config.type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE)
+ clean_prometheus_remote_write(instance);
+#endif
+
+ simple_connector_cleanup(instance);
+}
diff --git a/exporting/send_internal_metrics.c b/exporting/send_internal_metrics.c
new file mode 100644
index 000000000..defb8d047
--- /dev/null
+++ b/exporting/send_internal_metrics.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "exporting_engine.h"
+
+/**
+ * Create a chart for the main exporting thread CPU usage
+ *
+ * @param st_rusage the thead CPU usage chart
+ * @param rd_user a dimension for user CPU usage
+ * @param rd_system a dimension for system CPU usage
+ */
+void create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system)
+{
+ if (*st_rusage && *rd_user && *rd_system)
+ return;
+
+ *st_rusage = rrdset_create_localhost(
+ "netdata", "exporting_main_thread_cpu", NULL, "exporting", "exporting_cpu_usage", "Netdata Main Exporting Thread CPU Usage",
+ "milliseconds/s", "exporting", NULL, 130600, localhost->rrd_update_every, RRDSET_TYPE_STACKED);
+
+ *rd_user = rrddim_add(*st_rusage, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ *rd_system = rrddim_add(*st_rusage, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+}
+
+/**
+ * Send the main exporting thread CPU usage
+ *
+ * @param st_rusage a thead CPU usage chart
+ * @param rd_user a dimension for user CPU usage
+ * @param rd_system a dimension for system CPU usage
+ */
+void send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system)
+{
+ struct rusage thread;
+ getrusage(RUSAGE_THREAD, &thread);
+
+ if (likely(st_rusage->counter_done))
+ rrdset_next(st_rusage);
+
+ rrddim_set_by_pointer(st_rusage, rd_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set_by_pointer(st_rusage, rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+
+ rrdset_done(st_rusage);
+}
+
+/**
+ * Send internal metrics for an instance
+ *
+ * Send performance metrics for the operation of exporting engine itself to the Netdata database.
+ *
+ * @param instance an instance data structure.
+ */
+void send_internal_metrics(struct instance *instance)
+{
+ struct stats *stats = &instance->stats;
+
+ // ------------------------------------------------------------------------
+ // create charts for monitoring the exporting operations
+
+ if (!stats->initialized) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ BUFFER *family = buffer_create(0);
+
+ buffer_sprintf(family, "exporting_%s", instance->config.name);
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_metrics", instance->config.name);
+ netdata_fix_chart_id(id);
+
+ stats->st_metrics = rrdset_create_localhost(
+ "netdata", id, NULL, buffer_tostring(family), "exporting_buffer", "Netdata Buffered Metrics", "metrics", "exporting", NULL,
+ 130610, instance->config.update_every, RRDSET_TYPE_LINE);
+
+ stats->rd_buffered_metrics = rrddim_add(stats->st_metrics, "buffered", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_lost_metrics = rrddim_add(stats->st_metrics, "lost", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_sent_metrics = rrddim_add(stats->st_metrics, "sent", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ // ------------------------------------------------------------------------
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_bytes", instance->config.name);
+ netdata_fix_chart_id(id);
+
+ stats->st_bytes = rrdset_create_localhost(
+ "netdata", id, NULL, buffer_tostring(family), "exporting_data_size", "Netdata Exporting Data Size", "KiB", "exporting", NULL,
+ 130620, instance->config.update_every, RRDSET_TYPE_AREA);
+
+ stats->rd_buffered_bytes = rrddim_add(stats->st_bytes, "buffered", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_lost_bytes = rrddim_add(stats->st_bytes, "lost", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_sent_bytes = rrddim_add(stats->st_bytes, "sent", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_received_bytes = rrddim_add(stats->st_bytes, "received", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ // ------------------------------------------------------------------------
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_ops", instance->config.name);
+ netdata_fix_chart_id(id);
+
+ stats->st_ops = rrdset_create_localhost(
+ "netdata", id, NULL, buffer_tostring(family), "exporting_operations", "Netdata Exporting Operations", "operations", "exporting",
+ NULL, 130630, instance->config.update_every, RRDSET_TYPE_LINE);
+
+ stats->rd_transmission_successes = rrddim_add(stats->st_ops, "write", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_data_lost_events = rrddim_add(stats->st_ops, "discard", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_reconnects = rrddim_add(stats->st_ops, "reconnect", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_transmission_failures = rrddim_add(stats->st_ops, "failure", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ stats->rd_receptions = rrddim_add(stats->st_ops, "read", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ // ------------------------------------------------------------------------
+
+ snprintf(id, RRD_ID_LENGTH_MAX, "exporting_%s_thread_cpu", instance->config.name);
+ netdata_fix_chart_id(id);
+
+ stats->st_rusage = rrdset_create_localhost(
+ "netdata", id, NULL, buffer_tostring(family), "exporting_instance", "Netdata Exporting Instance Thread CPU Usage",
+ "milliseconds/s", "exporting", NULL, 130640, instance->config.update_every, RRDSET_TYPE_STACKED);
+
+ stats->rd_user = rrddim_add(stats->st_rusage, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ stats->rd_system = rrddim_add(stats->st_rusage, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+
+ buffer_free(family);
+
+ stats->initialized = 1;
+ }
+
+ // ------------------------------------------------------------------------
+ // update the monitoring charts
+
+ if (likely(stats->st_metrics->counter_done))
+ rrdset_next(stats->st_metrics);
+
+ rrddim_set_by_pointer(stats->st_metrics, stats->rd_buffered_metrics, stats->buffered_metrics);
+ rrddim_set_by_pointer(stats->st_metrics, stats->rd_lost_metrics, stats->lost_metrics);
+ rrddim_set_by_pointer(stats->st_metrics, stats->rd_sent_metrics, stats->sent_metrics);
+
+ rrdset_done(stats->st_metrics);
+
+ // ------------------------------------------------------------------------
+
+ if (likely(stats->st_bytes->counter_done))
+ rrdset_next(stats->st_bytes);
+
+ rrddim_set_by_pointer(stats->st_bytes, stats->rd_buffered_bytes, stats->buffered_bytes);
+ rrddim_set_by_pointer(stats->st_bytes, stats->rd_lost_bytes, stats->lost_bytes);
+ rrddim_set_by_pointer(stats->st_bytes, stats->rd_sent_bytes, stats->sent_bytes);
+ rrddim_set_by_pointer(stats->st_bytes, stats->rd_received_bytes, stats->received_bytes);
+
+ rrdset_done(stats->st_bytes);
+
+ // ------------------------------------------------------------------------
+
+ if (likely(stats->st_ops->counter_done))
+ rrdset_next(stats->st_ops);
+
+ rrddim_set_by_pointer(stats->st_ops, stats->rd_transmission_successes, stats->transmission_successes);
+ rrddim_set_by_pointer(stats->st_ops, stats->rd_data_lost_events, stats->data_lost_events);
+ rrddim_set_by_pointer(stats->st_ops, stats->rd_reconnects, stats->reconnects);
+ rrddim_set_by_pointer(stats->st_ops, stats->rd_transmission_failures, stats->transmission_failures);
+ rrddim_set_by_pointer(stats->st_ops, stats->rd_receptions, stats->receptions);
+
+ rrdset_done(stats->st_ops);
+
+ // ------------------------------------------------------------------------
+
+ struct rusage thread;
+ getrusage(RUSAGE_THREAD, &thread);
+
+ if (likely(stats->st_rusage->counter_done))
+ rrdset_next(stats->st_rusage);
+
+ rrddim_set_by_pointer(stats->st_rusage, stats->rd_user, thread.ru_utime.tv_sec * 1000000ULL + thread.ru_utime.tv_usec);
+ rrddim_set_by_pointer(stats->st_rusage, stats->rd_system, thread.ru_stime.tv_sec * 1000000ULL + thread.ru_stime.tv_usec);
+
+ rrdset_done(stats->st_rusage);
+}
diff --git a/exporting/tests/Makefile.am b/exporting/tests/Makefile.am
new file mode 100644
index 000000000..babdcf0df
--- /dev/null
+++ b/exporting/tests/Makefile.am
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
diff --git a/exporting/tests/exporting_doubles.c b/exporting/tests/exporting_doubles.c
new file mode 100644
index 000000000..3c73e0327
--- /dev/null
+++ b/exporting/tests/exporting_doubles.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "test_exporting_engine.h"
+
+struct engine *__real_read_exporting_config();
+struct engine *__wrap_read_exporting_config()
+{
+ function_called();
+ return mock_ptr_type(struct engine *);
+}
+
+struct engine *__mock_read_exporting_config()
+{
+ struct engine *engine = calloc(1, sizeof(struct engine));
+ engine->config.hostname = strdupz("test_engine_host");
+ engine->config.update_every = 3;
+
+
+ engine->instance_root = calloc(1, sizeof(struct instance));
+ struct instance *instance = engine->instance_root;
+ instance->engine = engine;
+ instance->config.type = EXPORTING_CONNECTOR_TYPE_GRAPHITE;
+ instance->config.name = strdupz("instance_name");
+ instance->config.destination = strdupz("localhost");
+ instance->config.prefix = strdupz("netdata");
+ instance->config.hostname = strdupz("test-host");
+ instance->config.update_every = 1;
+ instance->config.buffer_on_failures = 10;
+ instance->config.timeoutms = 10000;
+ instance->config.charts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
+ instance->config.hosts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+
+ return engine;
+}
+
+int __real_init_connectors(struct engine *engine);
+int __wrap_init_connectors(struct engine *engine)
+{
+ function_called();
+ check_expected_ptr(engine);
+ return mock_type(int);
+}
+
+int __real_mark_scheduled_instances(struct engine *engine);
+int __wrap_mark_scheduled_instances(struct engine *engine)
+{
+ function_called();
+ check_expected_ptr(engine);
+ return mock_type(int);
+}
+
+calculated_number __real_exporting_calculate_value_from_stored_data(
+ struct instance *instance,
+ RRDDIM *rd,
+ time_t *last_timestamp);
+calculated_number __wrap_exporting_calculate_value_from_stored_data(
+ struct instance *instance,
+ RRDDIM *rd,
+ time_t *last_timestamp)
+{
+ (void)instance;
+ (void)rd;
+
+ *last_timestamp = 15052;
+
+ function_called();
+ return mock_type(calculated_number);
+}
+
+int __real_prepare_buffers(struct engine *engine);
+int __wrap_prepare_buffers(struct engine *engine)
+{
+ function_called();
+ check_expected_ptr(engine);
+ return mock_type(int);
+}
+
+void __wrap_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system)
+{
+ function_called();
+ check_expected_ptr(st_rusage);
+ check_expected_ptr(rd_user);
+ check_expected_ptr(rd_system);
+}
+
+void __wrap_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system)
+{
+ function_called();
+ check_expected_ptr(st_rusage);
+ check_expected_ptr(rd_user);
+ check_expected_ptr(rd_system);
+}
+
+int __wrap_send_internal_metrics(struct instance *instance)
+{
+ function_called();
+ check_expected_ptr(instance);
+ return mock_type(int);
+}
+
+int __wrap_rrdhost_is_exportable(struct instance *instance, RRDHOST *host)
+{
+ function_called();
+ check_expected_ptr(instance);
+ check_expected_ptr(host);
+ return mock_type(int);
+}
+
+int __wrap_rrdset_is_exportable(struct instance *instance, RRDSET *st)
+{
+ function_called();
+ check_expected_ptr(instance);
+ check_expected_ptr(st);
+ return mock_type(int);
+}
+
+int __mock_start_batch_formatting(struct instance *instance)
+{
+ function_called();
+ check_expected_ptr(instance);
+ return mock_type(int);
+}
+
+int __mock_start_host_formatting(struct instance *instance, RRDHOST *host)
+{
+ function_called();
+ check_expected_ptr(instance);
+ check_expected_ptr(host);
+ return mock_type(int);
+}
+
+int __mock_start_chart_formatting(struct instance *instance, RRDSET *st)
+{
+ function_called();
+ check_expected_ptr(instance);
+ check_expected_ptr(st);
+ return mock_type(int);
+}
+
+int __mock_metric_formatting(struct instance *instance, RRDDIM *rd)
+{
+ function_called();
+ check_expected_ptr(instance);
+ check_expected_ptr(rd);
+ return mock_type(int);
+}
+
+int __mock_end_chart_formatting(struct instance *instance, RRDSET *st)
+{
+ function_called();
+ check_expected_ptr(instance);
+ check_expected_ptr(st);
+ return mock_type(int);
+}
+
+int __mock_end_host_formatting(struct instance *instance, RRDHOST *host)
+{
+ function_called();
+ check_expected_ptr(instance);
+ check_expected_ptr(host);
+ return mock_type(int);
+}
+
+int __mock_end_batch_formatting(struct instance *instance)
+{
+ function_called();
+ check_expected_ptr(instance);
+ return mock_type(int);
+}
+
+int __wrap_simple_connector_end_batch(struct instance *instance)
+{
+ function_called();
+ check_expected_ptr(instance);
+ return mock_type(int);
+}
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+void *__wrap_init_write_request()
+{
+ function_called();
+ return mock_ptr_type(void *);
+}
+
+void __wrap_add_host_info(
+ void *write_request_p,
+ const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp)
+{
+ function_called();
+ check_expected_ptr(write_request_p);
+ check_expected_ptr(name);
+ check_expected_ptr(instance);
+ check_expected_ptr(application);
+ check_expected_ptr(version);
+ check_expected(timestamp);
+}
+
+void __wrap_add_label(void *write_request_p, char *key, char *value)
+{
+ function_called();
+ check_expected_ptr(write_request_p);
+ check_expected_ptr(key);
+ check_expected_ptr(value);
+}
+
+void __wrap_add_metric(
+ void *write_request_p,
+ const char *name, const char *chart, const char *family, const char *dimension,
+ const char *instance, const double value, const int64_t timestamp)
+{
+ function_called();
+ check_expected_ptr(write_request_p);
+ check_expected_ptr(name);
+ check_expected_ptr(chart);
+ check_expected_ptr(family);
+ check_expected_ptr(dimension);
+ check_expected_ptr(instance);
+ check_expected(value);
+ check_expected(timestamp);
+}
+#endif // ENABLE_PROMETHEUS_REMOTE_WRITE
+
+#if HAVE_KINESIS
+void __wrap_aws_sdk_init()
+{
+ function_called();
+}
+
+void __wrap_kinesis_init(
+ void *kinesis_specific_data_p, const char *region, const char *access_key_id, const char *secret_key,
+ const long timeout)
+{
+ function_called();
+ check_expected_ptr(kinesis_specific_data_p);
+ check_expected_ptr(region);
+ check_expected_ptr(access_key_id);
+ check_expected_ptr(secret_key);
+ check_expected(timeout);
+}
+
+void __wrap_kinesis_put_record(
+ void *kinesis_specific_data_p, const char *stream_name, const char *partition_key, const char *data,
+ size_t data_len)
+{
+ function_called();
+ check_expected_ptr(kinesis_specific_data_p);
+ check_expected_ptr(stream_name);
+ check_expected_ptr(partition_key);
+ check_expected_ptr(data);
+ check_expected_ptr(data);
+ check_expected(data_len);
+}
+
+int __wrap_kinesis_get_result(void *request_outcomes_p, char *error_message, size_t *sent_bytes, size_t *lost_bytes)
+{
+ function_called();
+ check_expected_ptr(request_outcomes_p);
+ check_expected_ptr(error_message);
+ check_expected_ptr(sent_bytes);
+ check_expected_ptr(lost_bytes);
+ return mock_type(int);
+}
+#endif // HAVE_KINESIS
+
+#if ENABLE_EXPORTING_PUBSUB
+int __wrap_pubsub_init(
+ void *pubsub_specific_data_p, char *error_message, const char *destination, const char *credentials_file,
+ const char *project_id, const char *topic_id)
+{
+ function_called();
+ check_expected_ptr(pubsub_specific_data_p);
+ check_expected_ptr(error_message);
+ check_expected_ptr(destination);
+ check_expected_ptr(credentials_file);
+ check_expected_ptr(project_id);
+ check_expected_ptr(topic_id);
+ return mock_type(int);
+}
+
+int __wrap_pubsub_add_message(void *pubsub_specific_data_p, char *data)
+{
+ function_called();
+ check_expected_ptr(pubsub_specific_data_p);
+ check_expected_ptr(data);
+ return mock_type(int);
+}
+
+int __wrap_pubsub_publish(
+ void *pubsub_specific_data_p, char *error_message, size_t buffered_metrics, size_t buffered_bytes)
+{
+ function_called();
+ check_expected_ptr(pubsub_specific_data_p);
+ check_expected_ptr(error_message);
+ check_expected(buffered_metrics);
+ check_expected(buffered_bytes);
+ return mock_type(int);
+}
+
+int __wrap_pubsub_get_result(
+ void *pubsub_specific_data_p, char *error_message,
+ size_t *sent_metrics, size_t *sent_bytes, size_t *lost_metrics, size_t *lost_bytes)
+{
+ function_called();
+ check_expected_ptr(pubsub_specific_data_p);
+ check_expected_ptr(error_message);
+ check_expected_ptr(sent_metrics);
+ check_expected_ptr(sent_bytes);
+ check_expected_ptr(lost_metrics);
+ check_expected_ptr(lost_bytes);
+ return mock_type(int);
+}
+#endif // ENABLE_EXPORTING_PUBSUB
+
+#if HAVE_MONGOC
+void __wrap_mongoc_init()
+{
+ function_called();
+}
+
+mongoc_uri_t * __wrap_mongoc_uri_new_with_error (const char *uri_string, bson_error_t *error)
+{
+ function_called();
+ check_expected_ptr(uri_string);
+ check_expected_ptr(error);
+ return mock_ptr_type(mongoc_uri_t *);
+}
+
+int32_t __wrap_mongoc_uri_get_option_as_int32(const mongoc_uri_t *uri, const char *option, int32_t fallback)
+{
+ function_called();
+ check_expected_ptr(uri);
+ check_expected_ptr(option);
+ check_expected(fallback);
+ return mock_type(int32_t);
+}
+
+bool __wrap_mongoc_uri_set_option_as_int32 (const mongoc_uri_t *uri, const char *option, int32_t value)
+{
+ function_called();
+ check_expected_ptr(uri);
+ check_expected_ptr(option);
+ check_expected(value);
+ return mock_type(bool);
+}
+
+mongoc_client_t * __wrap_mongoc_client_new_from_uri (const mongoc_uri_t *uri)
+{
+ function_called();
+ check_expected_ptr(uri);
+ return mock_ptr_type(mongoc_client_t *);
+}
+
+bool __wrap_mongoc_client_set_appname (mongoc_client_t *client, const char *appname)
+{
+ function_called();
+ check_expected_ptr(client);
+ check_expected_ptr(appname);
+ return mock_type(bool);
+}
+
+mongoc_collection_t *
+__wrap_mongoc_client_get_collection(mongoc_client_t *client, const char *db, const char *collection)
+{
+ function_called();
+ check_expected_ptr(client);
+ check_expected_ptr(db);
+ check_expected_ptr(collection);
+ return mock_ptr_type(mongoc_collection_t *);
+}
+
+void __wrap_mongoc_uri_destroy (mongoc_uri_t *uri)
+{
+ function_called();
+ check_expected_ptr(uri);
+}
+
+bool __wrap_mongoc_collection_insert_many(
+ mongoc_collection_t *collection,
+ const bson_t **documents,
+ size_t n_documents,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error)
+{
+ function_called();
+ check_expected_ptr(collection);
+ check_expected_ptr(documents);
+ check_expected(n_documents);
+ check_expected_ptr(opts);
+ check_expected_ptr(reply);
+ check_expected_ptr(error);
+ return mock_type(bool);
+}
+#endif // HAVE_MONGOC
diff --git a/exporting/tests/exporting_fixtures.c b/exporting/tests/exporting_fixtures.c
new file mode 100644
index 000000000..00bb0ed0f
--- /dev/null
+++ b/exporting/tests/exporting_fixtures.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "test_exporting_engine.h"
+
+int setup_configured_engine(void **state)
+{
+ struct engine *engine = __mock_read_exporting_config();
+ engine->instance_root->data_is_ready = 1;
+
+ *state = engine;
+
+ return 0;
+}
+
+int teardown_configured_engine(void **state)
+{
+ struct engine *engine = *state;
+
+ struct instance *instance = engine->instance_root;
+ free((void *)instance->config.destination);
+ free((void *)instance->config.name);
+ free((void *)instance->config.prefix);
+ free((void *)instance->config.hostname);
+ simple_pattern_free(instance->config.charts_pattern);
+ simple_pattern_free(instance->config.hosts_pattern);
+ free(instance);
+
+ free((void *)engine->config.hostname);
+ free(engine);
+
+ return 0;
+}
+
+int setup_rrdhost()
+{
+ localhost = calloc(1, sizeof(RRDHOST));
+
+ localhost->rrd_update_every = 1;
+
+ localhost->tags = strdupz("TAG1=VALUE1 TAG2=VALUE2");
+
+ struct label *label = calloc(1, sizeof(struct label));
+ label->key = strdupz("key1");
+ label->value = strdupz("value1");
+ label->label_source = LABEL_SOURCE_NETDATA_CONF;
+ localhost->labels.head = label;
+
+ label = calloc(1, sizeof(struct label));
+ label->key = strdupz("key2");
+ label->value = strdupz("value2");
+ label->label_source = LABEL_SOURCE_AUTO;
+ localhost->labels.head->next = label;
+
+ localhost->rrdset_root = calloc(1, sizeof(RRDSET));
+ RRDSET *st = localhost->rrdset_root;
+ st->rrdhost = localhost;
+ strcpy(st->id, "chart_id");
+ st->name = strdupz("chart_name");
+ st->flags |= RRDSET_FLAG_ENABLED;
+ st->rrd_memory_mode |= RRD_MEMORY_MODE_SAVE;
+ st->update_every = 1;
+
+ localhost->rrdset_root->dimensions = calloc(1, sizeof(RRDDIM));
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ rd->rrdset = st;
+ rd->id = strdupz("dimension_id");
+ rd->name = strdupz("dimension_name");
+ rd->last_collected_value = 123000321;
+ rd->last_collected_time.tv_sec = 15051;
+ rd->collections_counter++;
+ rd->next = NULL;
+
+ rd->state = calloc(1, sizeof(*rd->state));
+ rd->state->query_ops.oldest_time = __mock_rrddim_query_oldest_time;
+ rd->state->query_ops.latest_time = __mock_rrddim_query_latest_time;
+ rd->state->query_ops.init = __mock_rrddim_query_init;
+ rd->state->query_ops.is_finished = __mock_rrddim_query_is_finished;
+ rd->state->query_ops.next_metric = __mock_rrddim_query_next_metric;
+ rd->state->query_ops.finalize = __mock_rrddim_query_finalize;
+
+ return 0;
+}
+
+int teardown_rrdhost()
+{
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ free((void *)rd->name);
+ free((void *)rd->id);
+ free(rd->state);
+ free(rd);
+
+ RRDSET *st = localhost->rrdset_root;
+ free((void *)st->name);
+ free(st);
+
+ free(localhost->labels.head->next->key);
+ free(localhost->labels.head->next->value);
+ free(localhost->labels.head->next);
+ free(localhost->labels.head->key);
+ free(localhost->labels.head->value);
+ free(localhost->labels.head);
+
+ free((void *)localhost->tags);
+ free(localhost);
+
+ return 0;
+}
+
+int setup_initialized_engine(void **state)
+{
+ setup_configured_engine(state);
+
+ struct engine *engine = *state;
+ init_connectors_in_tests(engine);
+
+ setup_rrdhost();
+
+ return 0;
+}
+
+int teardown_initialized_engine(void **state)
+{
+ struct engine *engine = *state;
+
+ teardown_rrdhost();
+ buffer_free(engine->instance_root->labels);
+ buffer_free(engine->instance_root->buffer);
+ teardown_configured_engine(state);
+
+ return 0;
+}
+
+int setup_prometheus(void **state)
+{
+ (void)state;
+
+ prometheus_exporter_instance = calloc(1, sizeof(struct instance));
+
+ setup_rrdhost();
+
+ prometheus_exporter_instance->config.update_every = 10;
+
+ prometheus_exporter_instance->config.options |=
+ EXPORTING_OPTION_SEND_NAMES | EXPORTING_OPTION_SEND_CONFIGURED_LABELS | EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ prometheus_exporter_instance->config.charts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
+ prometheus_exporter_instance->config.hosts_pattern = simple_pattern_create("*", NULL, SIMPLE_PATTERN_EXACT);
+
+ return 0;
+}
+
+int teardown_prometheus(void **state)
+{
+ (void)state;
+
+ teardown_rrdhost();
+
+ simple_pattern_free(prometheus_exporter_instance->config.charts_pattern);
+ simple_pattern_free(prometheus_exporter_instance->config.hosts_pattern);
+ free(prometheus_exporter_instance);
+
+ return 0;
+}
diff --git a/exporting/tests/netdata_doubles.c b/exporting/tests/netdata_doubles.c
new file mode 100644
index 000000000..f4da7769f
--- /dev/null
+++ b/exporting/tests/netdata_doubles.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "test_exporting_engine.h"
+
+// Use memomy allocation functions guarded by CMocka in strdupz
+const char *__wrap_strdupz(const char *s)
+{
+ char *duplicate = malloc(sizeof(char) * (strlen(s) + 1));
+ strcpy(duplicate, s);
+
+ return duplicate;
+}
+
+time_t __wrap_now_realtime_sec(void)
+{
+ function_called();
+ return mock_type(time_t);
+}
+
+void __wrap_uv_thread_set_name_np(uv_thread_t ut, const char* name)
+{
+ (void)ut;
+ (void)name;
+
+ function_called();
+}
+
+void __wrap_info_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+
+ function_called();
+
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(log_line, MAX_LOG_LINE, fmt, args);
+ va_end(args);
+}
+
+int __wrap_connect_to_one_of(
+ const char *destination,
+ int default_port,
+ struct timeval *timeout,
+ size_t *reconnects_counter,
+ char *connected_to,
+ size_t connected_to_size)
+{
+ (void)timeout;
+
+ function_called();
+
+ check_expected(destination);
+ check_expected_ptr(default_port);
+ // TODO: check_expected_ptr(timeout);
+ check_expected(reconnects_counter);
+ check_expected(connected_to);
+ check_expected(connected_to_size);
+
+ return mock_type(int);
+}
+
+void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line)
+{
+ (void)host;
+ (void)file;
+ (void)function;
+ (void)line;
+}
+
+void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line)
+{
+ (void)st;
+ (void)file;
+ (void)function;
+ (void)line;
+}
+
+void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line)
+{
+ (void)file;
+ (void)function;
+ (void)line;
+}
+
+RRDSET *rrdset_create_custom(
+ RRDHOST *host,
+ const char *type,
+ const char *id,
+ const char *name,
+ const char *family,
+ const char *context,
+ const char *title,
+ const char *units,
+ const char *plugin,
+ const char *module,
+ long priority,
+ int update_every,
+ RRDSET_TYPE chart_type,
+ RRD_MEMORY_MODE memory_mode,
+ long history_entries)
+{
+ check_expected_ptr(host);
+ check_expected_ptr(type);
+ check_expected_ptr(id);
+ check_expected_ptr(name);
+ check_expected_ptr(family);
+ check_expected_ptr(context);
+ UNUSED(title);
+ check_expected_ptr(units);
+ check_expected_ptr(plugin);
+ check_expected_ptr(module);
+ check_expected(priority);
+ check_expected(update_every);
+ check_expected(chart_type);
+ UNUSED(memory_mode);
+ UNUSED(history_entries);
+
+ function_called();
+
+ return mock_ptr_type(RRDSET *);
+}
+
+void rrdset_next_usec(RRDSET *st, usec_t microseconds)
+{
+ check_expected_ptr(st);
+ UNUSED(microseconds);
+
+ function_called();
+}
+
+void rrdset_done(RRDSET *st)
+{
+ check_expected_ptr(st);
+
+ function_called();
+}
+
+RRDDIM *rrddim_add_custom(
+ RRDSET *st,
+ const char *id,
+ const char *name,
+ collected_number multiplier,
+ collected_number divisor,
+ RRD_ALGORITHM algorithm,
+ RRD_MEMORY_MODE memory_mode)
+{
+ check_expected_ptr(st);
+ UNUSED(id);
+ check_expected_ptr(name);
+ check_expected(multiplier);
+ check_expected(divisor);
+ check_expected(algorithm);
+ UNUSED(memory_mode);
+
+ function_called();
+
+ return NULL;
+}
+
+collected_number rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, collected_number value)
+{
+ check_expected_ptr(st);
+ UNUSED(rd);
+ UNUSED(value);
+
+ function_called();
+
+ return 0;
+}
+
+const char *rrd_memory_mode_name(RRD_MEMORY_MODE id)
+{
+ (void)id;
+ return RRD_MEMORY_MODE_NONE_NAME;
+}
+
+calculated_number rrdvar2number(RRDVAR *rv)
+{
+ (void)rv;
+ return 0;
+}
+
+int foreach_host_variable_callback(RRDHOST *host, int (*callback)(RRDVAR *rv, void *data), void *data)
+{
+ (void)host;
+ (void)callback;
+ (void)data;
+ return 0;
+}
+
+void rrdset_update_heterogeneous_flag(RRDSET *st)
+{
+ (void)st;
+}
+
+time_t __mock_rrddim_query_oldest_time(RRDDIM *rd)
+{
+ (void)rd;
+
+ function_called();
+ return mock_type(time_t);
+}
+
+time_t __mock_rrddim_query_latest_time(RRDDIM *rd)
+{
+ (void)rd;
+
+ function_called();
+ return mock_type(time_t);
+}
+
+void __mock_rrddim_query_init(RRDDIM *rd, struct rrddim_query_handle *handle, time_t start_time, time_t end_time)
+{
+ (void)rd;
+ (void)handle;
+
+ function_called();
+ check_expected(start_time);
+ check_expected(end_time);
+}
+
+int __mock_rrddim_query_is_finished(struct rrddim_query_handle *handle)
+{
+ (void)handle;
+
+ function_called();
+ return mock_type(int);
+}
+
+storage_number __mock_rrddim_query_next_metric(struct rrddim_query_handle *handle, time_t *current_time)
+{
+ (void)handle;
+ (void)current_time;
+
+ function_called();
+ return mock_type(storage_number);
+}
+
+void __mock_rrddim_query_finalize(struct rrddim_query_handle *handle)
+{
+ (void)handle;
+
+ function_called();
+}
diff --git a/exporting/tests/system_doubles.c b/exporting/tests/system_doubles.c
new file mode 100644
index 000000000..ca85800c0
--- /dev/null
+++ b/exporting/tests/system_doubles.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "test_exporting_engine.h"
+
+void __wrap_uv_thread_create(uv_thread_t thread, void (*worker)(void *arg), void *arg)
+{
+ function_called();
+
+ check_expected_ptr(thread);
+ check_expected_ptr(worker);
+ check_expected_ptr(arg);
+}
+
+void __wrap_uv_mutex_lock(uv_mutex_t *mutex)
+{
+ (void)mutex;
+}
+
+void __wrap_uv_mutex_unlock(uv_mutex_t *mutex)
+{
+ (void)mutex;
+}
+
+void __wrap_uv_cond_signal(uv_cond_t *cond_var)
+{
+ (void)cond_var;
+}
+
+void __wrap_uv_cond_wait(uv_cond_t *cond_var, uv_mutex_t *mutex)
+{
+ (void)cond_var;
+ (void)mutex;
+}
+
+ssize_t __wrap_recv(int sockfd, void *buf, size_t len, int flags)
+{
+ function_called();
+
+ check_expected(sockfd);
+ check_expected_ptr(buf);
+ check_expected(len);
+ check_expected(flags);
+
+ char *mock_string = "Test recv";
+ strcpy(buf, mock_string);
+
+ return strlen(mock_string);
+}
+
+ssize_t __wrap_send(int sockfd, const void *buf, size_t len, int flags)
+{
+ function_called();
+
+ check_expected(sockfd);
+ check_expected_ptr(buf);
+ check_expected_ptr(buf);
+ check_expected(len);
+ check_expected(flags);
+
+ return strlen(buf);
+}
diff --git a/exporting/tests/test_exporting_engine.c b/exporting/tests/test_exporting_engine.c
new file mode 100644
index 000000000..774d1a265
--- /dev/null
+++ b/exporting/tests/test_exporting_engine.c
@@ -0,0 +1,1939 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "test_exporting_engine.h"
+#include "libnetdata/required_dummies.h"
+
+RRDHOST *localhost;
+netdata_rwlock_t rrd_rwlock;
+
+// global variables needed by read_exporting_config()
+struct config netdata_config;
+char *netdata_configured_user_config_dir = ".";
+char *netdata_configured_stock_config_dir = ".";
+char *netdata_configured_hostname = "test_global_host";
+
+char log_line[MAX_LOG_LINE + 1];
+
+BACKEND_OPTIONS global_backend_options = 0;
+const char *global_backend_source = "average";
+const char *global_backend_prefix = "netdata";
+
+void init_connectors_in_tests(struct engine *engine)
+{
+ expect_function_call(__wrap_now_realtime_sec);
+ will_return(__wrap_now_realtime_sec, 2);
+
+ expect_function_call(__wrap_uv_thread_create);
+
+ expect_value(__wrap_uv_thread_create, thread, &engine->instance_root->thread);
+ expect_value(__wrap_uv_thread_create, worker, simple_connector_worker);
+ expect_value(__wrap_uv_thread_create, arg, engine->instance_root);
+
+ expect_function_call(__wrap_uv_thread_set_name_np);
+
+ assert_int_equal(__real_init_connectors(engine), 0);
+
+ assert_int_equal(engine->now, 2);
+ assert_int_equal(engine->instance_root->after, 2);
+}
+
+static void test_exporting_engine(void **state)
+{
+ struct engine *engine = *state;
+
+ expect_function_call(__wrap_read_exporting_config);
+ will_return(__wrap_read_exporting_config, engine);
+
+ expect_function_call(__wrap_init_connectors);
+ expect_memory(__wrap_init_connectors, engine, engine, sizeof(struct engine));
+ will_return(__wrap_init_connectors, 0);
+
+ expect_function_call(__wrap_create_main_rusage_chart);
+ expect_not_value(__wrap_create_main_rusage_chart, st_rusage, NULL);
+ expect_not_value(__wrap_create_main_rusage_chart, rd_user, NULL);
+ expect_not_value(__wrap_create_main_rusage_chart, rd_system, NULL);
+
+ expect_function_call(__wrap_now_realtime_sec);
+ will_return(__wrap_now_realtime_sec, 2);
+
+ expect_function_call(__wrap_mark_scheduled_instances);
+ expect_memory(__wrap_mark_scheduled_instances, engine, engine, sizeof(struct engine));
+ will_return(__wrap_mark_scheduled_instances, 1);
+
+ expect_function_call(__wrap_prepare_buffers);
+ expect_memory(__wrap_prepare_buffers, engine, engine, sizeof(struct engine));
+ will_return(__wrap_prepare_buffers, 0);
+
+ expect_function_call(__wrap_send_main_rusage);
+ expect_value(__wrap_send_main_rusage, st_rusage, NULL);
+ expect_value(__wrap_send_main_rusage, rd_user, NULL);
+ expect_value(__wrap_send_main_rusage, rd_system, NULL);
+
+ void *ptr = malloc(sizeof(struct netdata_static_thread));
+ assert_ptr_equal(exporting_main(ptr), NULL);
+ assert_int_equal(engine->now, 2);
+ free(ptr);
+}
+
+static void test_read_exporting_config(void **state)
+{
+ struct engine *engine = __mock_read_exporting_config(); // TODO: use real read_exporting_config() function
+ *state = engine;
+
+ assert_ptr_not_equal(engine, NULL);
+ assert_string_equal(engine->config.hostname, "test_engine_host");
+ assert_int_equal(engine->config.update_every, 3);
+ assert_int_equal(engine->instance_num, 0);
+
+
+ struct instance *instance = engine->instance_root;
+ assert_ptr_not_equal(instance, NULL);
+ assert_ptr_equal(instance->next, NULL);
+ assert_ptr_equal(instance->engine, engine);
+ assert_int_equal(instance->config.type, EXPORTING_CONNECTOR_TYPE_GRAPHITE);
+ assert_string_equal(instance->config.destination, "localhost");
+ assert_string_equal(instance->config.prefix, "netdata");
+ assert_int_equal(instance->config.update_every, 1);
+ assert_int_equal(instance->config.buffer_on_failures, 10);
+ assert_int_equal(instance->config.timeoutms, 10000);
+ assert_true(simple_pattern_matches(instance->config.charts_pattern, "any_chart"));
+ assert_true(simple_pattern_matches(instance->config.hosts_pattern, "anyt_host"));
+ assert_int_equal(instance->config.options, EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES);
+
+ teardown_configured_engine(state);
+}
+
+static void test_init_connectors(void **state)
+{
+ struct engine *engine = *state;
+
+ init_connectors_in_tests(engine);
+
+ assert_int_equal(engine->instance_num, 1);
+
+ struct instance *instance = engine->instance_root;
+
+ assert_ptr_equal(instance->next, NULL);
+ assert_int_equal(instance->index, 0);
+
+ struct simple_connector_config *connector_specific_config = instance->config.connector_specific_config;
+ assert_int_equal(connector_specific_config->default_port, 2003);
+
+ assert_ptr_equal(instance->worker, simple_connector_worker);
+ assert_ptr_equal(instance->start_batch_formatting, NULL);
+ assert_ptr_equal(instance->start_host_formatting, format_host_labels_graphite_plaintext);
+ assert_ptr_equal(instance->start_chart_formatting, NULL);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_graphite_plaintext);
+ assert_ptr_equal(instance->end_chart_formatting, NULL);
+ assert_ptr_equal(instance->end_host_formatting, flush_host_labels);
+
+ BUFFER *buffer = instance->buffer;
+ assert_ptr_not_equal(buffer, NULL);
+ buffer_sprintf(buffer, "%s", "graphite test");
+ assert_string_equal(buffer_tostring(buffer), "graphite test");
+}
+
+static void test_init_graphite_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_graphite_instance(instance), 0);
+ assert_int_equal(
+ ((struct simple_connector_config *)(instance->config.connector_specific_config))->default_port, 2003);
+ freez(instance->config.connector_specific_config);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_graphite_plaintext);
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_graphite_instance(instance), 0);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_stored_graphite_plaintext);
+}
+
+static void test_init_json_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_json_instance(instance), 0);
+ assert_int_equal(
+ ((struct simple_connector_config *)(instance->config.connector_specific_config))->default_port, 5448);
+ freez(instance->config.connector_specific_config);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_json_plaintext);
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_json_instance(instance), 0);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_stored_json_plaintext);
+}
+
+static void test_init_opentsdb_telnet_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_opentsdb_telnet_instance(instance), 0);
+ assert_int_equal(
+ ((struct simple_connector_config *)(instance->config.connector_specific_config))->default_port, 4242);
+ freez(instance->config.connector_specific_config);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_opentsdb_telnet);
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_opentsdb_telnet_instance(instance), 0);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_stored_opentsdb_telnet);
+}
+
+static void test_init_opentsdb_http_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_opentsdb_http_instance(instance), 0);
+ assert_int_equal(
+ ((struct simple_connector_config *)(instance->config.connector_specific_config))->default_port, 4242);
+ freez(instance->config.connector_specific_config);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_opentsdb_http);
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
+ assert_int_equal(init_opentsdb_http_instance(instance), 0);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_stored_opentsdb_http);
+}
+
+static void test_mark_scheduled_instances(void **state)
+{
+ struct engine *engine = *state;
+
+ assert_int_equal(__real_mark_scheduled_instances(engine), 1);
+
+ struct instance *instance = engine->instance_root;
+ assert_int_equal(instance->scheduled, 1);
+ assert_int_equal(instance->before, 2);
+}
+
+static void test_rrdhost_is_exportable(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ expect_function_call(__wrap_info_int);
+
+ assert_ptr_equal(localhost->exporting_flags, NULL);
+
+ assert_int_equal(__real_rrdhost_is_exportable(instance, localhost), 1);
+
+ assert_string_equal(log_line, "enabled exporting of host 'localhost' for instance 'instance_name'");
+
+ assert_ptr_not_equal(localhost->exporting_flags, NULL);
+ assert_int_equal(localhost->exporting_flags[0], RRDHOST_FLAG_BACKEND_SEND);
+}
+
+static void test_false_rrdhost_is_exportable(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ simple_pattern_free(instance->config.hosts_pattern);
+ instance->config.hosts_pattern = simple_pattern_create("!*", NULL, SIMPLE_PATTERN_EXACT);
+
+ expect_function_call(__wrap_info_int);
+
+ assert_ptr_equal(localhost->exporting_flags, NULL);
+
+ assert_int_equal(__real_rrdhost_is_exportable(instance, localhost), 0);
+
+ assert_string_equal(log_line, "disabled exporting of host 'localhost' for instance 'instance_name'");
+
+ assert_ptr_not_equal(localhost->exporting_flags, NULL);
+ assert_int_equal(localhost->exporting_flags[0], RRDHOST_FLAG_BACKEND_DONT_SEND);
+}
+
+static void test_rrdset_is_exportable(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ RRDSET *st = localhost->rrdset_root;
+
+ assert_ptr_equal(st->exporting_flags, NULL);
+
+ assert_int_equal(__real_rrdset_is_exportable(instance, st), 1);
+
+ assert_ptr_not_equal(st->exporting_flags, NULL);
+ assert_int_equal(st->exporting_flags[0], RRDSET_FLAG_BACKEND_SEND);
+}
+
+static void test_false_rrdset_is_exportable(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ RRDSET *st = localhost->rrdset_root;
+
+ simple_pattern_free(instance->config.charts_pattern);
+ instance->config.charts_pattern = simple_pattern_create("!*", NULL, SIMPLE_PATTERN_EXACT);
+
+ assert_ptr_equal(st->exporting_flags, NULL);
+
+ assert_int_equal(__real_rrdset_is_exportable(instance, st), 0);
+
+ assert_ptr_not_equal(st->exporting_flags, NULL);
+ assert_int_equal(st->exporting_flags[0], RRDSET_FLAG_BACKEND_IGNORE);
+}
+
+static void test_exporting_calculate_value_from_stored_data(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ time_t timestamp;
+
+ instance->after = 3;
+ instance->before = 10;
+
+ expect_function_call(__mock_rrddim_query_oldest_time);
+ will_return(__mock_rrddim_query_oldest_time, 1);
+
+ expect_function_call(__mock_rrddim_query_latest_time);
+ will_return(__mock_rrddim_query_latest_time, 2);
+
+ expect_function_call(__mock_rrddim_query_init);
+ expect_value(__mock_rrddim_query_init, start_time, 1);
+ expect_value(__mock_rrddim_query_init, end_time, 2);
+
+ expect_function_call(__mock_rrddim_query_is_finished);
+ will_return(__mock_rrddim_query_is_finished, 0);
+ expect_function_call(__mock_rrddim_query_next_metric);
+ will_return(__mock_rrddim_query_next_metric, pack_storage_number(27, SN_EXISTS));
+
+ expect_function_call(__mock_rrddim_query_is_finished);
+ will_return(__mock_rrddim_query_is_finished, 0);
+ expect_function_call(__mock_rrddim_query_next_metric);
+ will_return(__mock_rrddim_query_next_metric, pack_storage_number(45, SN_EXISTS));
+
+ expect_function_call(__mock_rrddim_query_is_finished);
+ will_return(__mock_rrddim_query_is_finished, 1);
+
+ expect_function_call(__mock_rrddim_query_finalize);
+
+ assert_int_equal(__real_exporting_calculate_value_from_stored_data(instance, rd, &timestamp), 36);
+}
+
+static void test_prepare_buffers(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->start_batch_formatting = __mock_start_batch_formatting;
+ instance->start_host_formatting = __mock_start_host_formatting;
+ instance->start_chart_formatting = __mock_start_chart_formatting;
+ instance->metric_formatting = __mock_metric_formatting;
+ instance->end_chart_formatting = __mock_end_chart_formatting;
+ instance->end_host_formatting = __mock_end_host_formatting;
+ instance->end_batch_formatting = __mock_end_batch_formatting;
+ __real_mark_scheduled_instances(engine);
+
+ expect_function_call(__mock_start_batch_formatting);
+ expect_value(__mock_start_batch_formatting, instance, instance);
+ will_return(__mock_start_batch_formatting, 0);
+
+ expect_function_call(__wrap_rrdhost_is_exportable);
+ expect_value(__wrap_rrdhost_is_exportable, instance, instance);
+ expect_value(__wrap_rrdhost_is_exportable, host, localhost);
+ will_return(__wrap_rrdhost_is_exportable, 1);
+
+ expect_function_call(__mock_start_host_formatting);
+ expect_value(__mock_start_host_formatting, instance, instance);
+ expect_value(__mock_start_host_formatting, host, localhost);
+ will_return(__mock_start_host_formatting, 0);
+
+ RRDSET *st = localhost->rrdset_root;
+ expect_function_call(__wrap_rrdset_is_exportable);
+ expect_value(__wrap_rrdset_is_exportable, instance, instance);
+ expect_value(__wrap_rrdset_is_exportable, st, st);
+ will_return(__wrap_rrdset_is_exportable, 1);
+
+ expect_function_call(__mock_start_chart_formatting);
+ expect_value(__mock_start_chart_formatting, instance, instance);
+ expect_value(__mock_start_chart_formatting, st, st);
+ will_return(__mock_start_chart_formatting, 0);
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ expect_function_call(__mock_metric_formatting);
+ expect_value(__mock_metric_formatting, instance, instance);
+ expect_value(__mock_metric_formatting, rd, rd);
+ will_return(__mock_metric_formatting, 0);
+
+ expect_function_call(__mock_end_chart_formatting);
+ expect_value(__mock_end_chart_formatting, instance, instance);
+ expect_value(__mock_end_chart_formatting, st, st);
+ will_return(__mock_end_chart_formatting, 0);
+
+ expect_function_call(__mock_end_host_formatting);
+ expect_value(__mock_end_host_formatting, instance, instance);
+ expect_value(__mock_end_host_formatting, host, localhost);
+ will_return(__mock_end_host_formatting, 0);
+
+ expect_function_call(__mock_end_batch_formatting);
+ expect_value(__mock_end_batch_formatting, instance, instance);
+ will_return(__mock_end_batch_formatting, 0);
+
+ assert_int_equal(__real_prepare_buffers(engine), 0);
+
+ assert_int_equal(instance->stats.buffered_metrics, 1);
+
+ // check with NULL functions
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = NULL;
+ instance->start_chart_formatting = NULL;
+ instance->metric_formatting = NULL;
+ instance->end_chart_formatting = NULL;
+ instance->end_host_formatting = NULL;
+ instance->end_batch_formatting = NULL;
+ assert_int_equal(__real_prepare_buffers(engine), 0);
+
+ assert_int_equal(instance->scheduled, 0);
+ assert_int_equal(instance->after, 2);
+}
+
+static void test_exporting_name_copy(void **state)
+{
+ (void)state;
+
+ char *source_name = "test.name-with/special#characters_";
+ char destination_name[RRD_ID_LENGTH_MAX + 1];
+
+ assert_int_equal(exporting_name_copy(destination_name, source_name, RRD_ID_LENGTH_MAX), 34);
+ assert_string_equal(destination_name, "test.name_with_special_characters_");
+}
+
+static void test_format_dimension_collected_graphite_plaintext(void **state)
+{
+ struct engine *engine = *state;
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_collected_graphite_plaintext(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "netdata.test-host.chart_name.dimension_name;TAG1=VALUE1 TAG2=VALUE2 123000321 15051\n");
+}
+
+static void test_format_dimension_stored_graphite_plaintext(void **state)
+{
+ struct engine *engine = *state;
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_stored_graphite_plaintext(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "netdata.test-host.chart_name.dimension_name;TAG1=VALUE1 TAG2=VALUE2 690565856.0000000 15052\n");
+}
+
+static void test_format_dimension_collected_json_plaintext(void **state)
+{
+ struct engine *engine = *state;
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_collected_json_plaintext(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "{\"prefix\":\"netdata\",\"hostname\":\"test-host\",\"host_tags\":\"TAG1=VALUE1 TAG2=VALUE2\","
+ "\"chart_id\":\"chart_id\",\"chart_name\":\"chart_name\",\"chart_family\":\"(null)\","
+ "\"chart_context\":\"(null)\",\"chart_type\":\"(null)\",\"units\":\"(null)\",\"id\":\"dimension_id\","
+ "\"name\":\"dimension_name\",\"value\":123000321,\"timestamp\":15051}\n");
+}
+
+static void test_format_dimension_stored_json_plaintext(void **state)
+{
+ struct engine *engine = *state;
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_stored_json_plaintext(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "{\"prefix\":\"netdata\",\"hostname\":\"test-host\",\"host_tags\":\"TAG1=VALUE1 TAG2=VALUE2\","
+ "\"chart_id\":\"chart_id\",\"chart_name\":\"chart_name\",\"chart_family\":\"(null)\"," \
+ "\"chart_context\": \"(null)\",\"chart_type\":\"(null)\",\"units\": \"(null)\",\"id\":\"dimension_id\","
+ "\"name\":\"dimension_name\",\"value\":690565856.0000000,\"timestamp\": 15052}\n");
+}
+
+static void test_format_dimension_collected_opentsdb_telnet(void **state)
+{
+ struct engine *engine = *state;
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_collected_opentsdb_telnet(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "put netdata.chart_name.dimension_name 15051 123000321 host=test-host TAG1=VALUE1 TAG2=VALUE2\n");
+}
+
+static void test_format_dimension_stored_opentsdb_telnet(void **state)
+{
+ struct engine *engine = *state;
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_stored_opentsdb_telnet(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "put netdata.chart_name.dimension_name 15052 690565856.0000000 host=test-host TAG1=VALUE1 TAG2=VALUE2\n");
+}
+
+static void test_format_dimension_collected_opentsdb_http(void **state)
+{
+ struct engine *engine = *state;
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_collected_opentsdb_http(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "{\"metric\":\"netdata.chart_name.dimension_name\","
+ "\"timestamp\":15051,"
+ "\"value\":123000321,"
+ "\"tags\":{\"host\":\"test-host TAG1=VALUE1 TAG2=VALUE2\"}}");
+}
+
+static void test_format_dimension_stored_opentsdb_http(void **state)
+{
+ struct engine *engine = *state;
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+ assert_int_equal(format_dimension_stored_opentsdb_http(engine->instance_root, rd), 0);
+ assert_string_equal(
+ buffer_tostring(engine->instance_root->buffer),
+ "{\"metric\":\"netdata.chart_name.dimension_name\","
+ "\"timestamp\":15052,"
+ "\"value\":690565856.0000000,"
+ "\"tags\":{\"host\":\"test-host TAG1=VALUE1 TAG2=VALUE2\"}}");
+}
+
+static void test_exporting_discard_response(void **state)
+{
+ struct engine *engine = *state;
+
+ BUFFER *response = buffer_create(0);
+ buffer_sprintf(response, "Test response");
+
+ assert_int_equal(exporting_discard_response(response, engine->instance_root), 0);
+ assert_int_equal(buffer_strlen(response), 0);
+
+ buffer_free(response);
+}
+
+static void test_simple_connector_receive_response(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ struct stats *stats = &instance->stats;
+
+ int sock = 1;
+
+ expect_function_call(__wrap_recv);
+ expect_value(__wrap_recv, sockfd, 1);
+ expect_not_value(__wrap_recv, buf, 0);
+ expect_value(__wrap_recv, len, 4096);
+ expect_value(__wrap_recv, flags, MSG_DONTWAIT);
+
+ simple_connector_receive_response(&sock, instance);
+
+ assert_int_equal(stats->received_bytes, 9);
+ assert_int_equal(stats->receptions, 1);
+ assert_int_equal(sock, 1);
+}
+
+static void test_simple_connector_send_buffer(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ struct stats *stats = &instance->stats;
+
+ int sock = 1;
+ int failures = 3;
+ size_t buffered_metrics = 1;
+ BUFFER *header = buffer_create(0);
+ BUFFER *buffer = buffer_create(0);
+ buffer_strcat(header, "test header\n");
+ buffer_strcat(buffer, "test buffer\n");
+
+ expect_function_call(__wrap_send);
+ expect_value(__wrap_send, sockfd, 1);
+ expect_value(__wrap_send, buf, buffer_tostring(header));
+ expect_string(__wrap_send, buf, "test header\n");
+ expect_value(__wrap_send, len, 12);
+ expect_value(__wrap_send, flags, MSG_NOSIGNAL);
+
+ expect_function_call(__wrap_send);
+ expect_value(__wrap_send, sockfd, 1);
+ expect_value(__wrap_send, buf, buffer_tostring(buffer));
+ expect_string(__wrap_send, buf, "test buffer\n");
+ expect_value(__wrap_send, len, 12);
+ expect_value(__wrap_send, flags, MSG_NOSIGNAL);
+
+ simple_connector_send_buffer(&sock, &failures, instance, header, buffer, buffered_metrics);
+
+ assert_int_equal(failures, 0);
+ assert_int_equal(stats->transmission_successes, 1);
+ assert_int_equal(stats->sent_bytes, 12);
+ assert_int_equal(stats->sent_metrics, 1);
+ assert_int_equal(stats->transmission_failures, 0);
+
+ assert_int_equal(buffer_strlen(buffer), 0);
+
+ assert_int_equal(sock, 1);
+}
+
+static void test_simple_connector_worker(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ struct stats *stats = &instance->stats;
+
+ __real_mark_scheduled_instances(engine);
+
+ struct simple_connector_data *simple_connector_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = simple_connector_data;
+ simple_connector_data->last_buffer = callocz(1, sizeof(struct simple_connector_buffer));
+ simple_connector_data->first_buffer = simple_connector_data->last_buffer;
+ simple_connector_data->header = buffer_create(0);
+ simple_connector_data->buffer = buffer_create(0);
+ simple_connector_data->last_buffer->header = buffer_create(0);
+ simple_connector_data->last_buffer->buffer = buffer_create(0);
+
+ buffer_sprintf(simple_connector_data->last_buffer->header, "test header");
+ buffer_sprintf(simple_connector_data->last_buffer->buffer, "test buffer");
+
+ expect_function_call(__wrap_connect_to_one_of);
+ expect_string(__wrap_connect_to_one_of, destination, "localhost");
+ expect_value(__wrap_connect_to_one_of, default_port, 2003);
+ expect_not_value(__wrap_connect_to_one_of, reconnects_counter, 0);
+ expect_value(__wrap_connect_to_one_of, connected_to, 0);
+ expect_value(__wrap_connect_to_one_of, connected_to_size, 0);
+ will_return(__wrap_connect_to_one_of, 2);
+
+ expect_function_call(__wrap_send);
+ expect_value(__wrap_send, sockfd, 2);
+ expect_not_value(__wrap_send, buf, buffer_tostring(simple_connector_data->last_buffer->buffer));
+ expect_string(__wrap_send, buf, "test header");
+ expect_value(__wrap_send, len, 11);
+ expect_value(__wrap_send, flags, MSG_NOSIGNAL);
+
+ expect_function_call(__wrap_send);
+ expect_value(__wrap_send, sockfd, 2);
+ expect_value(__wrap_send, buf, buffer_tostring(simple_connector_data->last_buffer->buffer));
+ expect_string(__wrap_send, buf, "test buffer");
+ expect_value(__wrap_send, len, 11);
+ expect_value(__wrap_send, flags, MSG_NOSIGNAL);
+
+ expect_function_call(__wrap_send_internal_metrics);
+ expect_value(__wrap_send_internal_metrics, instance, instance);
+ will_return(__wrap_send_internal_metrics, 0);
+
+ simple_connector_worker(instance);
+
+ assert_int_equal(stats->buffered_metrics, 0);
+ assert_int_equal(stats->buffered_bytes, 0);
+ assert_int_equal(stats->received_bytes, 0);
+ assert_int_equal(stats->sent_bytes, 0);
+ assert_int_equal(stats->sent_metrics, 0);
+ assert_int_equal(stats->lost_metrics, 0);
+ assert_int_equal(stats->receptions, 0);
+ assert_int_equal(stats->transmission_successes, 0);
+ assert_int_equal(stats->transmission_failures, 0);
+ assert_int_equal(stats->data_lost_events, 0);
+ assert_int_equal(stats->lost_bytes, 0);
+ assert_int_equal(stats->reconnects, 0);
+}
+
+static void test_sanitize_json_string(void **state)
+{
+ (void)state;
+
+ char *src = "check \t\\\" string";
+ char dst[19 + 1];
+
+ sanitize_json_string(dst, src, 19);
+
+ assert_string_equal(dst, "check _\\\\\\\" string");
+}
+
+static void test_sanitize_graphite_label_value(void **state)
+{
+ (void)state;
+
+ char *src = "check ;~ string";
+ char dst[15 + 1];
+
+ sanitize_graphite_label_value(dst, src, 15);
+
+ assert_string_equal(dst, "check____string");
+}
+
+static void test_sanitize_opentsdb_label_value(void **state)
+{
+ (void)state;
+
+ char *src = "check \t\\\" #&$? -_./ string";
+ char dst[26 + 1];
+
+ sanitize_opentsdb_label_value(dst, src, 26);
+
+ assert_string_equal(dst, "check__________-_./_string");
+}
+
+static void test_format_host_labels_json_plaintext(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ assert_int_equal(format_host_labels_json_plaintext(instance, localhost), 0);
+ assert_string_equal(buffer_tostring(instance->labels), "\"labels\":{\"key1\":\"value1\",\"key2\":\"value2\"},");
+}
+
+static void test_format_host_labels_graphite_plaintext(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ assert_int_equal(format_host_labels_graphite_plaintext(instance, localhost), 0);
+ assert_string_equal(buffer_tostring(instance->labels), ";key1=value1;key2=value2");
+}
+
+static void test_format_host_labels_opentsdb_telnet(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ assert_int_equal(format_host_labels_opentsdb_telnet(instance, localhost), 0);
+ assert_string_equal(buffer_tostring(instance->labels), " key1=value1 key2=value2");
+}
+
+static void test_format_host_labels_opentsdb_http(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ assert_int_equal(format_host_labels_opentsdb_http(instance, localhost), 0);
+ assert_string_equal(buffer_tostring(instance->labels), ",\"key1\":\"value1\",\"key2\":\"value2\"");
+}
+
+static void test_flush_host_labels(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->labels = buffer_create(12);
+ buffer_strcat(instance->labels, "check string");
+ assert_int_equal(buffer_strlen(instance->labels), 12);
+
+ assert_int_equal(flush_host_labels(instance, localhost), 0);
+ assert_int_equal(buffer_strlen(instance->labels), 0);
+}
+
+static void test_create_main_rusage_chart(void **state)
+{
+ UNUSED(state);
+
+ RRDSET *st_rusage = calloc(1, sizeof(RRDSET));
+ RRDDIM *rd_user = NULL;
+ RRDDIM *rd_system = NULL;
+
+ expect_function_call(rrdset_create_custom);
+ expect_value(rrdset_create_custom, host, localhost);
+ expect_string(rrdset_create_custom, type, "netdata");
+ expect_string(rrdset_create_custom, id, "exporting_main_thread_cpu");
+ expect_value(rrdset_create_custom, name, NULL);
+ expect_string(rrdset_create_custom, family, "exporting");
+ expect_string(rrdset_create_custom, context, "exporting_cpu_usage");
+ expect_string(rrdset_create_custom, units, "milliseconds/s");
+ expect_string(rrdset_create_custom, plugin, "exporting");
+ expect_value(rrdset_create_custom, module, NULL);
+ expect_value(rrdset_create_custom, priority, 130600);
+ expect_value(rrdset_create_custom, update_every, localhost->rrd_update_every);
+ expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_STACKED);
+ will_return(rrdset_create_custom, st_rusage);
+
+ expect_function_calls(rrddim_add_custom, 2);
+ expect_value_count(rrddim_add_custom, st, st_rusage, 2);
+ expect_value_count(rrddim_add_custom, name, NULL, 2);
+ expect_value_count(rrddim_add_custom, multiplier, 1, 2);
+ expect_value_count(rrddim_add_custom, divisor, 1000, 2);
+ expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_INCREMENTAL, 2);
+
+ __real_create_main_rusage_chart(&st_rusage, &rd_user, &rd_system);
+
+ free(st_rusage);
+}
+
+static void test_send_main_rusage(void **state)
+{
+ UNUSED(state);
+
+ RRDSET *st_rusage = calloc(1, sizeof(RRDSET));
+ st_rusage->counter_done = 1;
+
+ expect_function_call(rrdset_next_usec);
+ expect_value(rrdset_next_usec, st, st_rusage);
+
+ expect_function_calls(rrddim_set_by_pointer, 2);
+ expect_value_count(rrddim_set_by_pointer, st, st_rusage, 2);
+
+ expect_function_call(rrdset_done);
+ expect_value(rrdset_done, st, st_rusage);
+
+ __real_send_main_rusage(st_rusage, NULL, NULL);
+
+ free(st_rusage);
+}
+
+static void test_send_internal_metrics(void **state)
+{
+ UNUSED(state);
+
+ struct instance *instance = calloc(1, sizeof(struct instance));
+ instance->config.name = (const char *)strdupz("test_instance");
+ instance->config.update_every = 2;
+
+ struct stats *stats = &instance->stats;
+
+ stats->st_metrics = calloc(1, sizeof(RRDSET));
+ stats->st_metrics->counter_done = 1;
+ stats->st_bytes = calloc(1, sizeof(RRDSET));
+ stats->st_bytes->counter_done = 1;
+ stats->st_ops = calloc(1, sizeof(RRDSET));
+ stats->st_ops->counter_done = 1;
+ stats->st_rusage = calloc(1, sizeof(RRDSET));
+ stats->st_rusage->counter_done = 1;
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_create_custom);
+ expect_value(rrdset_create_custom, host, localhost);
+ expect_string(rrdset_create_custom, type, "netdata");
+ expect_string(rrdset_create_custom, id, "exporting_test_instance_metrics");
+ expect_value(rrdset_create_custom, name, NULL);
+ expect_string(rrdset_create_custom, family, "exporting_test_instance");
+ expect_string(rrdset_create_custom, context, "exporting_buffer");
+ expect_string(rrdset_create_custom, units, "metrics");
+ expect_string(rrdset_create_custom, plugin, "exporting");
+ expect_value(rrdset_create_custom, module, NULL);
+ expect_value(rrdset_create_custom, priority, 130610);
+ expect_value(rrdset_create_custom, update_every, 2);
+ expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_LINE);
+ will_return(rrdset_create_custom, stats->st_metrics);
+
+ expect_function_calls(rrddim_add_custom, 3);
+ expect_value_count(rrddim_add_custom, st, stats->st_metrics, 3);
+ expect_value_count(rrddim_add_custom, name, NULL, 3);
+ expect_value_count(rrddim_add_custom, multiplier, 1, 3);
+ expect_value_count(rrddim_add_custom, divisor, 1, 3);
+ expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 3);
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_create_custom);
+ expect_value(rrdset_create_custom, host, localhost);
+ expect_string(rrdset_create_custom, type, "netdata");
+ expect_string(rrdset_create_custom, id, "exporting_test_instance_bytes");
+ expect_value(rrdset_create_custom, name, NULL);
+ expect_string(rrdset_create_custom, family, "exporting_test_instance");
+ expect_string(rrdset_create_custom, context, "exporting_data_size");
+ expect_string(rrdset_create_custom, units, "KiB");
+ expect_string(rrdset_create_custom, plugin, "exporting");
+ expect_value(rrdset_create_custom, module, NULL);
+ expect_value(rrdset_create_custom, priority, 130620);
+ expect_value(rrdset_create_custom, update_every, 2);
+ expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_AREA);
+ will_return(rrdset_create_custom, stats->st_bytes);
+
+ expect_function_calls(rrddim_add_custom, 4);
+ expect_value_count(rrddim_add_custom, st, stats->st_bytes, 4);
+ expect_value_count(rrddim_add_custom, name, NULL, 4);
+ expect_value_count(rrddim_add_custom, multiplier, 1, 4);
+ expect_value_count(rrddim_add_custom, divisor, 1024, 4);
+ expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 4);
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_create_custom);
+ expect_value(rrdset_create_custom, host, localhost);
+ expect_string(rrdset_create_custom, type, "netdata");
+ expect_string(rrdset_create_custom, id, "exporting_test_instance_ops");
+ expect_value(rrdset_create_custom, name, NULL);
+ expect_string(rrdset_create_custom, family, "exporting_test_instance");
+ expect_string(rrdset_create_custom, context, "exporting_operations");
+ expect_string(rrdset_create_custom, units, "operations");
+ expect_string(rrdset_create_custom, plugin, "exporting");
+ expect_value(rrdset_create_custom, module, NULL);
+ expect_value(rrdset_create_custom, priority, 130630);
+ expect_value(rrdset_create_custom, update_every, 2);
+ expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_LINE);
+ will_return(rrdset_create_custom, stats->st_ops);
+
+ expect_function_calls(rrddim_add_custom, 5);
+ expect_value_count(rrddim_add_custom, st, stats->st_ops, 5);
+ expect_value_count(rrddim_add_custom, name, NULL, 5);
+ expect_value_count(rrddim_add_custom, multiplier, 1, 5);
+ expect_value_count(rrddim_add_custom, divisor, 1, 5);
+ expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_ABSOLUTE, 5);
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_create_custom);
+ expect_value(rrdset_create_custom, host, localhost);
+ expect_string(rrdset_create_custom, type, "netdata");
+ expect_string(rrdset_create_custom, id, "exporting_test_instance_thread_cpu");
+ expect_value(rrdset_create_custom, name, NULL);
+ expect_string(rrdset_create_custom, family, "exporting_test_instance");
+ expect_string(rrdset_create_custom, context, "exporting_instance");
+ expect_string(rrdset_create_custom, units, "milliseconds/s");
+ expect_string(rrdset_create_custom, plugin, "exporting");
+ expect_value(rrdset_create_custom, module, NULL);
+ expect_value(rrdset_create_custom, priority, 130640);
+ expect_value(rrdset_create_custom, update_every, 2);
+ expect_value(rrdset_create_custom, chart_type, RRDSET_TYPE_STACKED);
+ will_return(rrdset_create_custom, stats->st_rusage);
+
+ expect_function_calls(rrddim_add_custom, 2);
+ expect_value_count(rrddim_add_custom, st, stats->st_rusage, 2);
+ expect_value_count(rrddim_add_custom, name, NULL, 2);
+ expect_value_count(rrddim_add_custom, multiplier, 1, 2);
+ expect_value_count(rrddim_add_custom, divisor, 1000, 2);
+ expect_value_count(rrddim_add_custom, algorithm, RRD_ALGORITHM_INCREMENTAL, 2);
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_next_usec);
+ expect_value(rrdset_next_usec, st, stats->st_metrics);
+
+ expect_function_calls(rrddim_set_by_pointer, 3);
+ expect_value_count(rrddim_set_by_pointer, st, stats->st_metrics, 3);
+
+ expect_function_call(rrdset_done);
+ expect_value(rrdset_done, st, stats->st_metrics);
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_next_usec);
+ expect_value(rrdset_next_usec, st, stats->st_bytes);
+
+ expect_function_calls(rrddim_set_by_pointer, 4);
+ expect_value_count(rrddim_set_by_pointer, st, stats->st_bytes, 4);
+
+ expect_function_call(rrdset_done);
+ expect_value(rrdset_done, st, stats->st_bytes);
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_next_usec);
+ expect_value(rrdset_next_usec, st, stats->st_ops);
+
+ expect_function_calls(rrddim_set_by_pointer, 5);
+ expect_value_count(rrddim_set_by_pointer, st, stats->st_ops, 5);
+
+ expect_function_call(rrdset_done);
+ expect_value(rrdset_done, st, stats->st_ops);
+
+ // ------------------------------------------------------------------------
+
+ expect_function_call(rrdset_next_usec);
+ expect_value(rrdset_next_usec, st, stats->st_rusage);
+
+ expect_function_calls(rrddim_set_by_pointer, 2);
+ expect_value_count(rrddim_set_by_pointer, st, stats->st_rusage, 2);
+
+ expect_function_call(rrdset_done);
+ expect_value(rrdset_done, st, stats->st_rusage);
+
+ // ------------------------------------------------------------------------
+
+ __real_send_internal_metrics(instance);
+
+ free(stats->st_metrics);
+ free(stats->st_bytes);
+ free(stats->st_ops);
+ free(stats->st_rusage);
+ free((void *)instance->config.name);
+ free(instance);
+}
+
+static void test_can_send_rrdset(void **state)
+{
+ (void)*state;
+
+ assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 1);
+
+ rrdset_flag_set(localhost->rrdset_root, RRDSET_FLAG_BACKEND_IGNORE);
+ assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 0);
+ rrdset_flag_clear(localhost->rrdset_root, RRDSET_FLAG_BACKEND_IGNORE);
+
+ // TODO: test with a denying simple pattern
+
+ rrdset_flag_set(localhost->rrdset_root, RRDSET_FLAG_OBSOLETE);
+ assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 0);
+ rrdset_flag_clear(localhost->rrdset_root, RRDSET_FLAG_OBSOLETE);
+
+ localhost->rrdset_root->rrd_memory_mode = RRD_MEMORY_MODE_NONE;
+ prometheus_exporter_instance->config.options |= EXPORTING_SOURCE_DATA_AVERAGE;
+ assert_int_equal(can_send_rrdset(prometheus_exporter_instance, localhost->rrdset_root), 0);
+}
+
+static void test_prometheus_name_copy(void **state)
+{
+ (void)*state;
+
+ char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
+ assert_int_equal(prometheus_name_copy(destination_name, "test-name", PROMETHEUS_ELEMENT_MAX), 9);
+
+ assert_string_equal(destination_name, "test_name");
+}
+
+static void test_prometheus_label_copy(void **state)
+{
+ (void)*state;
+
+ char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
+ assert_int_equal(prometheus_label_copy(destination_name, "test\"\\\nlabel", PROMETHEUS_ELEMENT_MAX), 15);
+
+ assert_string_equal(destination_name, "test\\\"\\\\\\\nlabel");
+}
+
+static void test_prometheus_units_copy(void **state)
+{
+ (void)*state;
+
+ char destination_name[PROMETHEUS_ELEMENT_MAX + 1];
+ assert_string_equal(prometheus_units_copy(destination_name, "test-units", PROMETHEUS_ELEMENT_MAX, 0), "_test_units");
+ assert_string_equal(destination_name, "_test_units");
+
+ assert_string_equal(prometheus_units_copy(destination_name, "%", PROMETHEUS_ELEMENT_MAX, 0), "_percent");
+ assert_string_equal(prometheus_units_copy(destination_name, "test-units/s", PROMETHEUS_ELEMENT_MAX, 0), "_test_units_persec");
+
+ assert_string_equal(prometheus_units_copy(destination_name, "KiB", PROMETHEUS_ELEMENT_MAX, 1), "_KB");
+}
+
+static void test_format_host_labels_prometheus(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ format_host_labels_prometheus(instance, localhost);
+ assert_string_equal(buffer_tostring(instance->labels), "key1=\"netdata\",key2=\"value2\"");
+}
+
+static void rrd_stats_api_v1_charts_allmetrics_prometheus(void **state)
+{
+ (void)state;
+
+ BUFFER *buffer = buffer_create(0);
+
+ localhost->hostname = strdupz("test_hostname");
+ localhost->rrdset_root->family = strdupz("test_family");
+ localhost->rrdset_root->context = strdupz("test_context");
+
+ expect_function_call(__wrap_now_realtime_sec);
+ will_return(__wrap_now_realtime_sec, 2);
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(localhost, buffer, "test_server", "test_prefix", 0, 0);
+
+ assert_string_equal(
+ buffer_tostring(buffer),
+ "netdata_info{instance=\"test_hostname\",application=\"(null)\",version=\"(null)\"} 1\n"
+ "netdata_host_tags_info{key1=\"value1\",key2=\"value2\"} 1\n"
+ "netdata_host_tags{key1=\"value1\",key2=\"value2\"} 1\n"
+ "test_prefix_test_context{chart=\"chart_id\",family=\"test_family\",dimension=\"dimension_id\"} 690565856.0000000\n");
+
+ buffer_flush(buffer);
+
+ expect_function_call(__wrap_now_realtime_sec);
+ will_return(__wrap_now_realtime_sec, 2);
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
+ localhost, buffer, "test_server", "test_prefix", 0, PROMETHEUS_OUTPUT_NAMES | PROMETHEUS_OUTPUT_TYPES);
+
+ assert_string_equal(
+ buffer_tostring(buffer),
+ "netdata_info{instance=\"test_hostname\",application=\"(null)\",version=\"(null)\"} 1\n"
+ "netdata_host_tags_info{key1=\"value1\",key2=\"value2\"} 1\n"
+ "netdata_host_tags{key1=\"value1\",key2=\"value2\"} 1\n"
+ "# TYPE test_prefix_test_context gauge\n"
+ "test_prefix_test_context{chart=\"chart_name\",family=\"test_family\",dimension=\"dimension_name\"} 690565856.0000000\n");
+
+ buffer_flush(buffer);
+
+ expect_function_call(__wrap_now_realtime_sec);
+ will_return(__wrap_now_realtime_sec, 2);
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(localhost, buffer, "test_server", "test_prefix", 0, 0);
+
+ assert_string_equal(
+ buffer_tostring(buffer),
+ "netdata_info{instance=\"test_hostname\",application=\"(null)\",version=\"(null)\"} 1\n"
+ "netdata_host_tags_info{instance=\"test_hostname\",key1=\"value1\",key2=\"value2\"} 1\n"
+ "netdata_host_tags{instance=\"test_hostname\",key1=\"value1\",key2=\"value2\"} 1\n"
+ "test_prefix_test_context{chart=\"chart_id\",family=\"test_family\",dimension=\"dimension_id\",instance=\"test_hostname\"} 690565856.0000000\n");
+
+ free(localhost->rrdset_root->context);
+ free(localhost->rrdset_root->family);
+ free(localhost->hostname);
+ buffer_free(buffer);
+}
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+static void test_init_prometheus_remote_write_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ expect_function_call(__wrap_init_write_request);
+ will_return(__wrap_init_write_request, 0xff);
+
+ assert_int_equal(init_prometheus_remote_write_instance(instance), 0);
+
+ assert_ptr_equal(instance->worker, simple_connector_worker);
+ assert_ptr_equal(instance->start_batch_formatting, NULL);
+ assert_ptr_equal(instance->start_host_formatting, format_host_prometheus_remote_write);
+ assert_ptr_equal(instance->start_chart_formatting, format_chart_prometheus_remote_write);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_prometheus_remote_write);
+ assert_ptr_equal(instance->end_chart_formatting, NULL);
+ assert_ptr_equal(instance->end_host_formatting, NULL);
+ assert_ptr_equal(instance->end_batch_formatting, format_batch_prometheus_remote_write);
+ assert_ptr_equal(instance->prepare_header, prometheus_remote_write_prepare_header);
+ assert_ptr_equal(instance->check_response, process_prometheus_remote_write_response);
+
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ (struct prometheus_remote_write_specific_data *)instance->connector_specific_data;
+
+ assert_ptr_not_equal(instance->connector_specific_data, NULL);
+ assert_ptr_not_equal(connector_specific_data->write_request, NULL);
+ freez(instance->connector_specific_data);
+}
+
+static void test_prometheus_remote_write_prepare_header(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ struct prometheus_remote_write_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct prometheus_remote_write_specific_config));
+ instance->config.connector_specific_config = connector_specific_config;
+ connector_specific_config->remote_write_path = strdupz("/receive");
+
+ struct simple_connector_data *simple_connector_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = simple_connector_data;
+ simple_connector_data->last_buffer = callocz(1, sizeof(struct simple_connector_buffer));
+ simple_connector_data->last_buffer->header = buffer_create(0);
+ simple_connector_data->last_buffer->buffer = buffer_create(0);
+
+ buffer_sprintf(simple_connector_data->last_buffer->buffer, "test buffer");
+
+ prometheus_remote_write_prepare_header(instance);
+
+ assert_string_equal(
+ buffer_tostring(simple_connector_data->last_buffer->header),
+ "POST /receive HTTP/1.1\r\n"
+ "Host: localhost\r\n"
+ "Accept: */*\r\n"
+ "Content-Encoding: snappy\r\n"
+ "Content-Type: application/x-protobuf\r\n"
+ "X-Prometheus-Remote-Write-Version: 0.1.0\r\n"
+ "Content-Length: 11\r\n"
+ "\r\n");
+
+ free(connector_specific_config->remote_write_path);
+
+ buffer_free(simple_connector_data->last_buffer->header);
+ buffer_free(simple_connector_data->last_buffer->buffer);
+}
+
+static void test_process_prometheus_remote_write_response(void **state)
+{
+ (void)state;
+ BUFFER *buffer = buffer_create(0);
+
+ buffer_sprintf(buffer, "HTTP/1.1 200 OK\r\n");
+ assert_int_equal(process_prometheus_remote_write_response(buffer, NULL), 0);
+
+ buffer_free(buffer);
+}
+
+static void test_format_host_prometheus_remote_write(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options |= EXPORTING_OPTION_SEND_CONFIGURED_LABELS;
+ instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
+
+ struct simple_connector_data *simple_connector_data = mallocz(sizeof(struct simple_connector_data *));
+ instance->connector_specific_data = simple_connector_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ mallocz(sizeof(struct prometheus_remote_write_specific_data *));
+ simple_connector_data->connector_specific_data = (void *)connector_specific_data;
+ connector_specific_data->write_request = (void *)0xff;
+
+ localhost->program_name = strdupz("test_program");
+ localhost->program_version = strdupz("test_version");
+
+ expect_function_call(__wrap_add_host_info);
+ expect_value(__wrap_add_host_info, write_request_p, 0xff);
+ expect_string(__wrap_add_host_info, name, "netdata_info");
+ expect_string(__wrap_add_host_info, instance, "test-host");
+ expect_string(__wrap_add_host_info, application, "test_program");
+ expect_string(__wrap_add_host_info, version, "test_version");
+ expect_in_range(
+ __wrap_add_host_info, timestamp, now_realtime_usec() / USEC_PER_MS - 1000, now_realtime_usec() / USEC_PER_MS);
+
+ expect_function_call(__wrap_add_label);
+ expect_value(__wrap_add_label, write_request_p, 0xff);
+ expect_string(__wrap_add_label, key, "key1");
+ expect_string(__wrap_add_label, value, "value1");
+
+ expect_function_call(__wrap_add_label);
+ expect_value(__wrap_add_label, write_request_p, 0xff);
+ expect_string(__wrap_add_label, key, "key2");
+ expect_string(__wrap_add_label, value, "value2");
+
+ assert_int_equal(format_host_prometheus_remote_write(instance, localhost), 0);
+
+ freez(connector_specific_data);
+ freez(simple_connector_data);
+ free(localhost->program_name);
+ free(localhost->program_version);
+}
+
+static void test_format_dimension_prometheus_remote_write(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ struct simple_connector_data *simple_connector_data = mallocz(sizeof(struct simple_connector_data *));
+ instance->connector_specific_data = simple_connector_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ mallocz(sizeof(struct prometheus_remote_write_specific_data *));
+ simple_connector_data->connector_specific_data = (void *)connector_specific_data;
+ connector_specific_data->write_request = (void *)0xff;
+
+ RRDDIM *rd = localhost->rrdset_root->dimensions;
+
+ expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+
+ expect_function_call(__wrap_add_metric);
+ expect_value(__wrap_add_metric, write_request_p, 0xff);
+ expect_string(__wrap_add_metric, name, "netdata_");
+ expect_string(__wrap_add_metric, chart, "");
+ expect_string(__wrap_add_metric, family, "");
+ expect_string(__wrap_add_metric, dimension, "dimension_name");
+ expect_string(__wrap_add_metric, instance, "test-host");
+ expect_value(__wrap_add_metric, value, 0x292932e0);
+ expect_value(__wrap_add_metric, timestamp, 15052 * MSEC_PER_SEC);
+
+ assert_int_equal(format_dimension_prometheus_remote_write(instance, rd), 0);
+}
+
+static void test_format_batch_prometheus_remote_write(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ struct simple_connector_data *simple_connector_data = mallocz(sizeof(struct simple_connector_data *));
+ instance->connector_specific_data = simple_connector_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ mallocz(sizeof(struct prometheus_remote_write_specific_data *));
+ simple_connector_data->connector_specific_data = (void *)connector_specific_data;
+ connector_specific_data->write_request = __real_init_write_request();
+
+ expect_function_call(__wrap_simple_connector_end_batch);
+ expect_value(__wrap_simple_connector_end_batch, instance, instance);
+ will_return(__wrap_simple_connector_end_batch, 0);
+ __real_add_host_info(
+ connector_specific_data->write_request,
+ "test_name", "test_instance", "test_application", "test_version", 15051);
+
+ __real_add_label(connector_specific_data->write_request, "test_key", "test_value");
+
+ __real_add_metric(
+ connector_specific_data->write_request,
+ "test_name", "test chart", "test_family", "test_dimension", "test_instance",
+ 123000321, 15052);
+
+ assert_int_equal(format_batch_prometheus_remote_write(instance), 0);
+
+ BUFFER *buffer = instance->buffer;
+ assert_int_equal(buffer_strlen(buffer), 192);
+
+ BUFFER *escaped_buffer = buffer_create(850);
+ size_t len = buffer_strlen(buffer);
+ char *ch = (char *)buffer_tostring(buffer);
+ for (; len > 0; ch++, len--)
+ buffer_sprintf(escaped_buffer, "\\%03o", (unsigned int)*ch);
+ assert_string_equal(
+ buffer_tostring(escaped_buffer),
+ "\\37777777641\\002\\120\\012\\37777777622\\001\\012\\025\\012\\010\\137\\137\\156\\141\\155\\145\\137\\137"
+ "\\022\\011\\164\\145\\163\\164\\005\\015\\064\\012\\031\\012\\010\\151\\156\\163\\164\\141\\156\\143\\145\\022"
+ "\\015\\005\\027\\021\\017\\100\\012\\037\\012\\013\\141\\160\\160\\154\\151\\143\\141\\164\\151\\157\\156\\022"
+ "\\020\\005\\036\\035\\022\\034\\012\\027\\012\\007\\166\\145\\162\\163\\001\\035\\000\\014\\005\\035\\015\\016"
+ "\\014\\012\\026\\012\\010\\005\\020\\020\\153\\145\\171\\022\\012\\005\\012\\040\\166\\141\\154\\165\\145\\022"
+ "\\014\\011\\000\\005\\001\\030\\37777777760\\077\\020\\37777777713\\165\\012\\37777777611\\142\\37777777625"
+ "\\000\\034\\023\\012\\005\\143\\150\\141\\162\\164\\011\\075\\000\\040\\005\\014\\054\\012\\025\\012\\006\\146"
+ "\\141\\155\\151\\154\\171\\022\\013\\005\\123\\011\\015\\040\\012\\033\\012\\011\\144\\151\\155\\145\\156\\005"
+ "\\37777777607\\000\\016\\005\\032\\025\\020\\000\\012\\146\\37777777736\\000\\064\\022\\014\\011\\000\\000\\000"
+ "\\004\\130\\123\\37777777635\\101\\020\\37777777714\\165");
+
+ buffer_free(escaped_buffer);
+ protocol_buffers_shutdown();
+}
+#endif // ENABLE_PROMETHEUS_REMOTE_WRITE
+
+#if HAVE_KINESIS
+static void test_init_aws_kinesis_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+
+ struct aws_kinesis_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct aws_kinesis_specific_config));
+ instance->config.connector_specific_config = connector_specific_config;
+ connector_specific_config->stream_name = strdupz("test_stream");
+ connector_specific_config->auth_key_id = strdupz("test_auth_key_id");
+ connector_specific_config->secure_key = strdupz("test_secure_key");
+
+ expect_function_call(__wrap_aws_sdk_init);
+ expect_function_call(__wrap_kinesis_init);
+ expect_not_value(__wrap_kinesis_init, kinesis_specific_data_p, NULL);
+ expect_string(__wrap_kinesis_init, region, "localhost");
+ expect_string(__wrap_kinesis_init, access_key_id, "test_auth_key_id");
+ expect_string(__wrap_kinesis_init, secret_key, "test_secure_key");
+ expect_value(__wrap_kinesis_init, timeout, 10000);
+
+ assert_int_equal(init_aws_kinesis_instance(instance), 0);
+
+ assert_ptr_equal(instance->worker, aws_kinesis_connector_worker);
+ assert_ptr_equal(instance->start_batch_formatting, NULL);
+ assert_ptr_equal(instance->start_host_formatting, format_host_labels_json_plaintext);
+ assert_ptr_equal(instance->start_chart_formatting, NULL);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_json_plaintext);
+ assert_ptr_equal(instance->end_chart_formatting, NULL);
+ assert_ptr_equal(instance->end_host_formatting, flush_host_labels);
+ assert_ptr_equal(instance->end_batch_formatting, NULL);
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+ assert_ptr_not_equal(instance->connector_specific_data, NULL);
+ freez(instance->connector_specific_data);
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
+
+ expect_function_call(__wrap_kinesis_init);
+ expect_not_value(__wrap_kinesis_init, kinesis_specific_data_p, NULL);
+ expect_string(__wrap_kinesis_init, region, "localhost");
+ expect_string(__wrap_kinesis_init, access_key_id, "test_auth_key_id");
+ expect_string(__wrap_kinesis_init, secret_key, "test_secure_key");
+ expect_value(__wrap_kinesis_init, timeout, 10000);
+
+ assert_int_equal(init_aws_kinesis_instance(instance), 0);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_stored_json_plaintext);
+
+ free(connector_specific_config->stream_name);
+ free(connector_specific_config->auth_key_id);
+ free(connector_specific_config->secure_key);
+}
+
+static void test_aws_kinesis_connector_worker(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ struct stats *stats = &instance->stats;
+ BUFFER *buffer = instance->buffer;
+
+ __real_mark_scheduled_instances(engine);
+
+ expect_function_call(__wrap_rrdhost_is_exportable);
+ expect_value(__wrap_rrdhost_is_exportable, instance, instance);
+ expect_value(__wrap_rrdhost_is_exportable, host, localhost);
+ will_return(__wrap_rrdhost_is_exportable, 1);
+
+ RRDSET *st = localhost->rrdset_root;
+ expect_function_call(__wrap_rrdset_is_exportable);
+ expect_value(__wrap_rrdset_is_exportable, instance, instance);
+ expect_value(__wrap_rrdset_is_exportable, st, st);
+ will_return(__wrap_rrdset_is_exportable, 1);
+
+ expect_function_call(__wrap_simple_connector_end_batch);
+ expect_value(__wrap_simple_connector_end_batch, instance, instance);
+ will_return(__wrap_simple_connector_end_batch, 0);
+ __real_prepare_buffers(engine);
+
+ struct aws_kinesis_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct aws_kinesis_specific_config));
+ instance->config.connector_specific_config = connector_specific_config;
+ connector_specific_config->stream_name = strdupz("test_stream");
+ connector_specific_config->auth_key_id = strdupz("test_auth_key_id");
+ connector_specific_config->secure_key = strdupz("test_secure_key");
+
+ struct aws_kinesis_specific_data *connector_specific_data = callocz(1, sizeof(struct aws_kinesis_specific_data));
+ instance->connector_specific_data = (void *)connector_specific_data;
+
+ expect_function_call(__wrap_kinesis_put_record);
+ expect_not_value(__wrap_kinesis_put_record, kinesis_specific_data_p, NULL);
+ expect_string(__wrap_kinesis_put_record, stream_name, "test_stream");
+ expect_string(__wrap_kinesis_put_record, partition_key, "netdata_0");
+ expect_value(__wrap_kinesis_put_record, data, buffer_tostring(buffer));
+ // The buffer is prepared by Graphite exporting connector
+ expect_string(
+ __wrap_kinesis_put_record, data,
+ "netdata.test-host.chart_name.dimension_name;TAG1=VALUE1 TAG2=VALUE2 123000321 15051\n");
+ expect_value(__wrap_kinesis_put_record, data_len, 84);
+
+ expect_function_call(__wrap_kinesis_get_result);
+ expect_value(__wrap_kinesis_get_result, request_outcomes_p, NULL);
+ expect_not_value(__wrap_kinesis_get_result, error_message, NULL);
+ expect_not_value(__wrap_kinesis_get_result, sent_bytes, NULL);
+ expect_not_value(__wrap_kinesis_get_result, lost_bytes, NULL);
+ will_return(__wrap_kinesis_get_result, 0);
+
+ expect_function_call(__wrap_send_internal_metrics);
+ expect_value(__wrap_send_internal_metrics, instance, instance);
+ will_return(__wrap_send_internal_metrics, 0);
+
+ aws_kinesis_connector_worker(instance);
+
+ assert_int_equal(stats->buffered_metrics, 0);
+ assert_int_equal(stats->buffered_bytes, 84);
+ assert_int_equal(stats->received_bytes, 0);
+ assert_int_equal(stats->sent_bytes, 84);
+ assert_int_equal(stats->sent_metrics, 1);
+ assert_int_equal(stats->lost_metrics, 0);
+ assert_int_equal(stats->receptions, 1);
+ assert_int_equal(stats->transmission_successes, 1);
+ assert_int_equal(stats->transmission_failures, 0);
+ assert_int_equal(stats->data_lost_events, 0);
+ assert_int_equal(stats->lost_bytes, 0);
+ assert_int_equal(stats->reconnects, 0);
+
+ free(connector_specific_config->stream_name);
+ free(connector_specific_config->auth_key_id);
+ free(connector_specific_config->secure_key);
+}
+#endif // HAVE_KINESIS
+
+#if ENABLE_EXPORTING_PUBSUB
+static void test_init_pubsub_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+
+ struct pubsub_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct pubsub_specific_config));
+ instance->config.connector_specific_config = connector_specific_config;
+ connector_specific_config->credentials_file = strdupz("/test/credentials/file");
+ connector_specific_config->project_id = strdupz("test_project_id");
+ connector_specific_config->topic_id = strdupz("test_topic_id");
+
+ expect_function_call(__wrap_pubsub_init);
+ expect_not_value(__wrap_pubsub_init, pubsub_specific_data_p, NULL);
+ expect_string(__wrap_pubsub_init, destination, "localhost");
+ expect_string(__wrap_pubsub_init, error_message, "");
+ expect_string(__wrap_pubsub_init, credentials_file, "/test/credentials/file");
+ expect_string(__wrap_pubsub_init, project_id, "test_project_id");
+ expect_string(__wrap_pubsub_init, topic_id, "test_topic_id");
+ will_return(__wrap_pubsub_init, 0);
+
+ assert_int_equal(init_pubsub_instance(instance), 0);
+
+ assert_ptr_equal(instance->worker, pubsub_connector_worker);
+ assert_ptr_equal(instance->start_batch_formatting, NULL);
+ assert_ptr_equal(instance->start_host_formatting, format_host_labels_json_plaintext);
+ assert_ptr_equal(instance->start_chart_formatting, NULL);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_json_plaintext);
+ assert_ptr_equal(instance->end_chart_formatting, NULL);
+ assert_ptr_equal(instance->end_host_formatting, flush_host_labels);
+ assert_ptr_equal(instance->end_batch_formatting, NULL);
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+ assert_ptr_not_equal(instance->connector_specific_data, NULL);
+ freez(instance->connector_specific_data);
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
+
+ expect_function_call(__wrap_pubsub_init);
+ expect_not_value(__wrap_pubsub_init, pubsub_specific_data_p, NULL);
+ expect_string(__wrap_pubsub_init, destination, "localhost");
+ expect_string(__wrap_pubsub_init, error_message, "");
+ expect_string(__wrap_pubsub_init, credentials_file, "/test/credentials/file");
+ expect_string(__wrap_pubsub_init, project_id, "test_project_id");
+ expect_string(__wrap_pubsub_init, topic_id, "test_topic_id");
+ will_return(__wrap_pubsub_init, 0);
+
+ assert_int_equal(init_pubsub_instance(instance), 0);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_stored_json_plaintext);
+
+ free(connector_specific_config->credentials_file);
+ free(connector_specific_config->project_id);
+ free(connector_specific_config->topic_id);
+}
+
+static void test_pubsub_connector_worker(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ struct stats *stats = &instance->stats;
+
+ __real_mark_scheduled_instances(engine);
+
+ expect_function_call(__wrap_rrdhost_is_exportable);
+ expect_value(__wrap_rrdhost_is_exportable, instance, instance);
+ expect_value(__wrap_rrdhost_is_exportable, host, localhost);
+ will_return(__wrap_rrdhost_is_exportable, 1);
+
+ RRDSET *st = localhost->rrdset_root;
+ expect_function_call(__wrap_rrdset_is_exportable);
+ expect_value(__wrap_rrdset_is_exportable, instance, instance);
+ expect_value(__wrap_rrdset_is_exportable, st, st);
+ will_return(__wrap_rrdset_is_exportable, 1);
+
+ expect_function_call(__wrap_simple_connector_end_batch);
+ expect_value(__wrap_simple_connector_end_batch, instance, instance);
+ will_return(__wrap_simple_connector_end_batch, 0);
+ __real_prepare_buffers(engine);
+
+ struct pubsub_specific_config *connector_specific_config =
+ callocz(1, sizeof(struct pubsub_specific_config));
+ instance->config.connector_specific_config = connector_specific_config;
+ connector_specific_config->credentials_file = strdupz("/test/credentials/file");
+ connector_specific_config->project_id = strdupz("test_project_id");
+ connector_specific_config->topic_id = strdupz("test_topic_id");
+
+ struct pubsub_specific_data *connector_specific_data = callocz(1, sizeof(struct pubsub_specific_data));
+ instance->connector_specific_data = (void *)connector_specific_data;
+
+ expect_function_call(__wrap_pubsub_add_message);
+ expect_not_value(__wrap_pubsub_add_message, pubsub_specific_data_p, NULL);
+ // The buffer is prepared by Graphite exporting connector
+ expect_string(
+ __wrap_pubsub_add_message, data,
+ "netdata.test-host.chart_name.dimension_name;TAG1=VALUE1 TAG2=VALUE2 123000321 15051\n");
+ will_return(__wrap_pubsub_add_message, 0);
+
+ expect_function_call(__wrap_pubsub_publish);
+ expect_not_value(__wrap_pubsub_publish, pubsub_specific_data_p, NULL);
+ expect_string(__wrap_pubsub_publish, error_message, "");
+ expect_value(__wrap_pubsub_publish, buffered_metrics, 1);
+ expect_value(__wrap_pubsub_publish, buffered_bytes, 84);
+ will_return(__wrap_pubsub_publish, 0);
+
+ expect_function_call(__wrap_pubsub_get_result);
+ expect_not_value(__wrap_pubsub_get_result, pubsub_specific_data_p, NULL);
+ expect_not_value(__wrap_pubsub_get_result, error_message, NULL);
+ expect_not_value(__wrap_pubsub_get_result, sent_metrics, NULL);
+ expect_not_value(__wrap_pubsub_get_result, sent_bytes, NULL);
+ expect_not_value(__wrap_pubsub_get_result, lost_metrics, NULL);
+ expect_not_value(__wrap_pubsub_get_result, lost_bytes, NULL);
+ will_return(__wrap_pubsub_get_result, 0);
+
+ expect_function_call(__wrap_send_internal_metrics);
+ expect_value(__wrap_send_internal_metrics, instance, instance);
+ will_return(__wrap_send_internal_metrics, 0);
+
+ pubsub_connector_worker(instance);
+
+ assert_int_equal(stats->buffered_metrics, 0);
+ assert_int_equal(stats->buffered_bytes, 84);
+ assert_int_equal(stats->received_bytes, 0);
+ assert_int_equal(stats->sent_bytes, 84);
+ assert_int_equal(stats->sent_metrics, 0);
+ assert_int_equal(stats->lost_metrics, 0);
+ assert_int_equal(stats->receptions, 1);
+ assert_int_equal(stats->transmission_successes, 1);
+ assert_int_equal(stats->transmission_failures, 0);
+ assert_int_equal(stats->data_lost_events, 0);
+ assert_int_equal(stats->lost_bytes, 0);
+ assert_int_equal(stats->reconnects, 0);
+
+ free(connector_specific_config->credentials_file);
+ free(connector_specific_config->project_id);
+ free(connector_specific_config->topic_id);
+}
+#endif // ENABLE_EXPORTING_PUBSUB
+
+#if HAVE_MONGOC
+static void test_init_mongodb_instance(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ instance->config.options = EXPORTING_SOURCE_DATA_AS_COLLECTED | EXPORTING_OPTION_SEND_NAMES;
+
+ struct mongodb_specific_config *connector_specific_config = callocz(1, sizeof(struct mongodb_specific_config));
+ instance->config.connector_specific_config = connector_specific_config;
+ connector_specific_config->database = strdupz("test_database");
+ connector_specific_config->collection = strdupz("test_collection");
+ instance->config.buffer_on_failures = 10;
+
+ expect_function_call(__wrap_mongoc_init);
+ expect_function_call(__wrap_mongoc_uri_new_with_error);
+ expect_string(__wrap_mongoc_uri_new_with_error, uri_string, "localhost");
+ expect_not_value(__wrap_mongoc_uri_new_with_error, error, NULL);
+ will_return(__wrap_mongoc_uri_new_with_error, 0xf1);
+
+ expect_function_call(__wrap_mongoc_uri_get_option_as_int32);
+ expect_value(__wrap_mongoc_uri_get_option_as_int32, uri, 0xf1);
+ expect_string(__wrap_mongoc_uri_get_option_as_int32, option, MONGOC_URI_SOCKETTIMEOUTMS);
+ expect_value(__wrap_mongoc_uri_get_option_as_int32, fallback, 1000);
+ will_return(__wrap_mongoc_uri_get_option_as_int32, 1000);
+
+ expect_function_call(__wrap_mongoc_uri_set_option_as_int32);
+ expect_value(__wrap_mongoc_uri_set_option_as_int32, uri, 0xf1);
+ expect_string(__wrap_mongoc_uri_set_option_as_int32, option, MONGOC_URI_SOCKETTIMEOUTMS);
+ expect_value(__wrap_mongoc_uri_set_option_as_int32, value, 1000);
+ will_return(__wrap_mongoc_uri_set_option_as_int32, true);
+
+ expect_function_call(__wrap_mongoc_client_new_from_uri);
+ expect_value(__wrap_mongoc_client_new_from_uri, uri, 0xf1);
+ will_return(__wrap_mongoc_client_new_from_uri, 0xf2);
+
+ expect_function_call(__wrap_mongoc_client_set_appname);
+ expect_value(__wrap_mongoc_client_set_appname, client, 0xf2);
+ expect_string(__wrap_mongoc_client_set_appname, appname, "netdata");
+ will_return(__wrap_mongoc_client_set_appname, true);
+
+ expect_function_call(__wrap_mongoc_client_get_collection);
+ expect_value(__wrap_mongoc_client_get_collection, client, 0xf2);
+ expect_string(__wrap_mongoc_client_get_collection, db, "test_database");
+ expect_string(__wrap_mongoc_client_get_collection, collection, "test_collection");
+ will_return(__wrap_mongoc_client_get_collection, 0xf3);
+
+ expect_function_call(__wrap_mongoc_uri_destroy);
+ expect_value(__wrap_mongoc_uri_destroy, uri, 0xf1);
+
+ assert_int_equal(init_mongodb_instance(instance), 0);
+
+ assert_ptr_equal(instance->worker, mongodb_connector_worker);
+ assert_ptr_equal(instance->start_batch_formatting, NULL);
+ assert_ptr_equal(instance->start_host_formatting, format_host_labels_json_plaintext);
+ assert_ptr_equal(instance->start_chart_formatting, NULL);
+ assert_ptr_equal(instance->metric_formatting, format_dimension_collected_json_plaintext);
+ assert_ptr_equal(instance->end_chart_formatting, NULL);
+ assert_ptr_equal(instance->end_host_formatting, flush_host_labels);
+ assert_ptr_equal(instance->end_batch_formatting, format_batch_mongodb);
+ assert_ptr_equal(instance->prepare_header, NULL);
+ assert_ptr_equal(instance->check_response, NULL);
+
+ assert_ptr_not_equal(instance->buffer, NULL);
+ buffer_free(instance->buffer);
+
+ assert_ptr_not_equal(instance->connector_specific_data, NULL);
+
+ struct mongodb_specific_data *connector_specific_data =
+ (struct mongodb_specific_data *)instance->connector_specific_data;
+ size_t number_of_buffers = 1;
+ struct bson_buffer *current_buffer = connector_specific_data->first_buffer;
+ while (current_buffer->next != connector_specific_data->first_buffer) {
+ current_buffer = current_buffer->next;
+ number_of_buffers++;
+ if (number_of_buffers == (size_t)(instance->config.buffer_on_failures + 1)) {
+ number_of_buffers = 0;
+ break;
+ }
+ }
+ assert_int_equal(number_of_buffers, 9);
+
+ free(connector_specific_config->database);
+ free(connector_specific_config->collection);
+}
+
+static void test_format_batch_mongodb(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+ struct stats *stats = &instance->stats;
+
+ struct mongodb_specific_data *connector_specific_data = mallocz(sizeof(struct mongodb_specific_data));
+ instance->connector_specific_data = (void *)connector_specific_data;
+
+ struct bson_buffer *current_buffer = callocz(1, sizeof(struct bson_buffer));
+ connector_specific_data->first_buffer = current_buffer;
+ connector_specific_data->first_buffer->next = current_buffer;
+ connector_specific_data->last_buffer = current_buffer;
+
+ BUFFER *buffer = buffer_create(0);
+ buffer_sprintf(buffer, "{ \"metric\": \"test_metric\" }\n");
+ instance->buffer = buffer;
+ stats->buffered_metrics = 1;
+
+ assert_int_equal(format_batch_mongodb(instance), 0);
+
+ assert_int_equal(connector_specific_data->last_buffer->documents_inserted, 1);
+ assert_int_equal(buffer_strlen(buffer), 0);
+
+ size_t len;
+ char *str = bson_as_canonical_extended_json(connector_specific_data->last_buffer->insert[0], &len);
+ assert_string_equal(str, "{ \"metric\" : \"test_metric\" }");
+
+ freez(str);
+ buffer_free(buffer);
+}
+
+static void test_mongodb_connector_worker(void **state)
+{
+ struct engine *engine = *state;
+ struct instance *instance = engine->instance_root;
+
+ struct mongodb_specific_config *connector_specific_config = callocz(1, sizeof(struct mongodb_specific_config));
+ instance->config.connector_specific_config = connector_specific_config;
+ connector_specific_config->database = strdupz("test_database");
+
+ struct mongodb_specific_data *connector_specific_data = callocz(1, sizeof(struct mongodb_specific_data));
+ instance->connector_specific_data = (void *)connector_specific_data;
+ connector_specific_config->collection = strdupz("test_collection");
+
+ struct bson_buffer *buffer = callocz(1, sizeof(struct bson_buffer));
+ buffer->documents_inserted = 1;
+ connector_specific_data->first_buffer = buffer;
+ connector_specific_data->first_buffer->next = buffer;
+
+ connector_specific_data->first_buffer->insert = callocz(1, sizeof(bson_t *));
+ bson_error_t bson_error;
+ connector_specific_data->first_buffer->insert[0] =
+ bson_new_from_json((const uint8_t *)"{ \"test_key\" : \"test_value\" }", -1, &bson_error);
+
+ connector_specific_data->client = mongoc_client_new("mongodb://localhost");
+ connector_specific_data->collection =
+ __real_mongoc_client_get_collection(connector_specific_data->client, "test_database", "test_collection");
+
+ expect_function_call(__wrap_mongoc_collection_insert_many);
+ expect_value(__wrap_mongoc_collection_insert_many, collection, connector_specific_data->collection);
+ expect_value(__wrap_mongoc_collection_insert_many, documents, connector_specific_data->first_buffer->insert);
+ expect_value(__wrap_mongoc_collection_insert_many, n_documents, 1);
+ expect_value(__wrap_mongoc_collection_insert_many, opts, NULL);
+ expect_value(__wrap_mongoc_collection_insert_many, reply, NULL);
+ expect_not_value(__wrap_mongoc_collection_insert_many, error, NULL);
+ will_return(__wrap_mongoc_collection_insert_many, true);
+
+ expect_function_call(__wrap_send_internal_metrics);
+ expect_value(__wrap_send_internal_metrics, instance, instance);
+ will_return(__wrap_send_internal_metrics, 0);
+
+ mongodb_connector_worker(instance);
+
+ assert_ptr_equal(connector_specific_data->first_buffer->insert, NULL);
+ assert_int_equal(connector_specific_data->first_buffer->documents_inserted, 0);
+ assert_ptr_equal(connector_specific_data->first_buffer, connector_specific_data->first_buffer->next);
+
+ struct stats *stats = &instance->stats;
+ assert_int_equal(stats->buffered_metrics, 0);
+ assert_int_equal(stats->buffered_bytes, 0);
+ assert_int_equal(stats->received_bytes, 0);
+ assert_int_equal(stats->sent_bytes, 30);
+ assert_int_equal(stats->sent_metrics, 1);
+ assert_int_equal(stats->lost_metrics, 0);
+ assert_int_equal(stats->receptions, 1);
+ assert_int_equal(stats->transmission_successes, 1);
+ assert_int_equal(stats->transmission_failures, 0);
+ assert_int_equal(stats->data_lost_events, 0);
+ assert_int_equal(stats->lost_bytes, 0);
+ assert_int_equal(stats->reconnects, 0);
+
+ free(connector_specific_config->database);
+ free(connector_specific_config->collection);
+}
+#endif // HAVE_MONGOC
+
+int main(void)
+{
+ const struct CMUnitTest tests[] = {
+ cmocka_unit_test_setup_teardown(test_exporting_engine, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test(test_read_exporting_config),
+ cmocka_unit_test_setup_teardown(test_init_connectors, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_init_graphite_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_init_json_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_init_opentsdb_telnet_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_init_opentsdb_http_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_mark_scheduled_instances, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_rrdhost_is_exportable, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_false_rrdhost_is_exportable, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_rrdset_is_exportable, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_false_rrdset_is_exportable, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_exporting_calculate_value_from_stored_data, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(test_prepare_buffers, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test(test_exporting_name_copy),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_collected_graphite_plaintext, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_stored_graphite_plaintext, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_collected_json_plaintext, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_stored_json_plaintext, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_collected_opentsdb_telnet, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_stored_opentsdb_telnet, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_collected_opentsdb_http, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_stored_opentsdb_http, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_exporting_discard_response, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_simple_connector_receive_response, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_simple_connector_send_buffer, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_simple_connector_worker, setup_initialized_engine, teardown_initialized_engine),
+ };
+
+ const struct CMUnitTest label_tests[] = {
+ cmocka_unit_test(test_sanitize_json_string),
+ cmocka_unit_test(test_sanitize_graphite_label_value),
+ cmocka_unit_test(test_sanitize_opentsdb_label_value),
+ cmocka_unit_test_setup_teardown(
+ test_format_host_labels_json_plaintext, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_host_labels_graphite_plaintext, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_host_labels_opentsdb_telnet, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_host_labels_opentsdb_http, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(test_flush_host_labels, setup_initialized_engine, teardown_initialized_engine),
+ };
+
+ int test_res = cmocka_run_group_tests_name("exporting_engine", tests, NULL, NULL) +
+ cmocka_run_group_tests_name("labels_in_exporting_engine", label_tests, NULL, NULL);
+
+ const struct CMUnitTest internal_metrics_tests[] = {
+ cmocka_unit_test_setup_teardown(test_create_main_rusage_chart, setup_rrdhost, teardown_rrdhost),
+ cmocka_unit_test(test_send_main_rusage),
+ cmocka_unit_test(test_send_internal_metrics),
+ };
+
+ test_res += cmocka_run_group_tests_name("internal_metrics", internal_metrics_tests, NULL, NULL);
+
+ const struct CMUnitTest prometheus_web_api_tests[] = {
+ cmocka_unit_test_setup_teardown(test_can_send_rrdset, setup_prometheus, teardown_prometheus),
+ cmocka_unit_test_setup_teardown(test_prometheus_name_copy, setup_prometheus, teardown_prometheus),
+ cmocka_unit_test_setup_teardown(test_prometheus_label_copy, setup_prometheus, teardown_prometheus),
+ cmocka_unit_test_setup_teardown(test_prometheus_units_copy, setup_prometheus, teardown_prometheus),
+ cmocka_unit_test_setup_teardown(
+ test_format_host_labels_prometheus, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ rrd_stats_api_v1_charts_allmetrics_prometheus, setup_prometheus, teardown_prometheus),
+ };
+
+ test_res += cmocka_run_group_tests_name("prometheus_web_api", prometheus_web_api_tests, NULL, NULL);
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+ const struct CMUnitTest prometheus_remote_write_tests[] = {
+ cmocka_unit_test_setup_teardown(
+ test_init_prometheus_remote_write_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_prometheus_remote_write_prepare_header, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test(test_process_prometheus_remote_write_response),
+ cmocka_unit_test_setup_teardown(
+ test_format_host_prometheus_remote_write, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_dimension_prometheus_remote_write, setup_initialized_engine, teardown_initialized_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_batch_prometheus_remote_write, setup_initialized_engine, teardown_initialized_engine),
+ };
+
+ test_res += cmocka_run_group_tests_name(
+ "prometheus_remote_write_exporting_connector", prometheus_remote_write_tests, NULL, NULL);
+#endif
+
+#if HAVE_KINESIS
+ const struct CMUnitTest kinesis_tests[] = {
+ cmocka_unit_test_setup_teardown(
+ test_init_aws_kinesis_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_aws_kinesis_connector_worker, setup_initialized_engine, teardown_initialized_engine),
+ };
+
+ test_res += cmocka_run_group_tests_name("kinesis_exporting_connector", kinesis_tests, NULL, NULL);
+#endif
+
+#if ENABLE_EXPORTING_PUBSUB
+ const struct CMUnitTest pubsub_tests[] = {
+ cmocka_unit_test_setup_teardown(
+ test_init_pubsub_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_pubsub_connector_worker, setup_initialized_engine, teardown_initialized_engine),
+ };
+
+ test_res += cmocka_run_group_tests_name("pubsub_exporting_connector", pubsub_tests, NULL, NULL);
+#endif
+
+#if HAVE_MONGOC
+ const struct CMUnitTest mongodb_tests[] = {
+ cmocka_unit_test_setup_teardown(
+ test_init_mongodb_instance, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_format_batch_mongodb, setup_configured_engine, teardown_configured_engine),
+ cmocka_unit_test_setup_teardown(
+ test_mongodb_connector_worker, setup_configured_engine, teardown_configured_engine),
+ };
+
+ test_res += cmocka_run_group_tests_name("mongodb_exporting_connector", mongodb_tests, NULL, NULL);
+#endif
+
+ return test_res;
+}
diff --git a/exporting/tests/test_exporting_engine.h b/exporting/tests/test_exporting_engine.h
new file mode 100644
index 000000000..800be1b99
--- /dev/null
+++ b/exporting/tests/test_exporting_engine.h
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef TEST_EXPORTING_ENGINE_H
+#define TEST_EXPORTING_ENGINE_H 1
+
+#include "libnetdata/libnetdata.h"
+
+#include "exporting/exporting_engine.h"
+#include "exporting/graphite/graphite.h"
+#include "exporting/json/json.h"
+#include "exporting/opentsdb/opentsdb.h"
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#include "exporting/prometheus/remote_write/remote_write.h"
+#endif
+
+#if HAVE_KINESIS
+#include "exporting/aws_kinesis/aws_kinesis.h"
+#endif
+
+#if ENABLE_EXPORTING_PUBSUB
+#include "exporting/pubsub/pubsub.h"
+#endif
+
+#if HAVE_MONGOC
+#include "exporting/mongodb/mongodb.h"
+#endif
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <setjmp.h>
+#include <stdint.h>
+#include <cmocka.h>
+
+#define MAX_LOG_LINE 1024
+extern char log_line[];
+
+// -----------------------------------------------------------------------
+// doubles for Netdata functions
+
+const char *__wrap_strdupz(const char *s);
+void __wrap_info_int(const char *file, const char *function, const unsigned long line, const char *fmt, ...);
+int __wrap_connect_to_one_of(
+ const char *destination,
+ int default_port,
+ struct timeval *timeout,
+ size_t *reconnects_counter,
+ char *connected_to,
+ size_t connected_to_size);
+void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line);
+void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line);
+void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line);
+time_t __mock_rrddim_query_oldest_time(RRDDIM *rd);
+time_t __mock_rrddim_query_latest_time(RRDDIM *rd);
+void __mock_rrddim_query_init(RRDDIM *rd, struct rrddim_query_handle *handle, time_t start_time, time_t end_time);
+int __mock_rrddim_query_is_finished(struct rrddim_query_handle *handle);
+storage_number __mock_rrddim_query_next_metric(struct rrddim_query_handle *handle, time_t *current_time);
+void __mock_rrddim_query_finalize(struct rrddim_query_handle *handle);
+
+// -----------------------------------------------------------------------
+// wraps for system functions
+
+void __wrap_uv_thread_create(uv_thread_t thread, void (*worker)(void *arg), void *arg);
+void __wrap_uv_mutex_lock(uv_mutex_t *mutex);
+void __wrap_uv_mutex_unlock(uv_mutex_t *mutex);
+void __wrap_uv_cond_signal(uv_cond_t *cond_var);
+void __wrap_uv_cond_wait(uv_cond_t *cond_var, uv_mutex_t *mutex);
+ssize_t __wrap_recv(int sockfd, void *buf, size_t len, int flags);
+ssize_t __wrap_send(int sockfd, const void *buf, size_t len, int flags);
+
+// -----------------------------------------------------------------------
+// doubles and originals for exporting engine functions
+
+struct engine *__real_read_exporting_config();
+struct engine *__wrap_read_exporting_config();
+struct engine *__mock_read_exporting_config();
+
+int __real_init_connectors(struct engine *engine);
+int __wrap_init_connectors(struct engine *engine);
+
+int __real_mark_scheduled_instances(struct engine *engine);
+int __wrap_mark_scheduled_instances(struct engine *engine);
+
+calculated_number __real_exporting_calculate_value_from_stored_data(
+ struct instance *instance,
+ RRDDIM *rd,
+ time_t *last_timestamp);
+calculated_number __wrap_exporting_calculate_value_from_stored_data(
+ struct instance *instance,
+ RRDDIM *rd,
+ time_t *last_timestamp);
+
+int __real_prepare_buffers(struct engine *engine);
+int __wrap_prepare_buffers(struct engine *engine);
+
+void __real_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system);
+void __wrap_create_main_rusage_chart(RRDSET **st_rusage, RRDDIM **rd_user, RRDDIM **rd_system);
+
+void __real_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system);
+void __wrap_send_main_rusage(RRDSET *st_rusage, RRDDIM *rd_user, RRDDIM *rd_system);
+
+int __real_send_internal_metrics(struct instance *instance);
+int __wrap_send_internal_metrics(struct instance *instance);
+
+int __real_rrdhost_is_exportable(struct instance *instance, RRDHOST *host);
+int __wrap_rrdhost_is_exportable(struct instance *instance, RRDHOST *host);
+
+int __real_rrdset_is_exportable(struct instance *instance, RRDSET *st);
+int __wrap_rrdset_is_exportable(struct instance *instance, RRDSET *st);
+
+int __mock_start_batch_formatting(struct instance *instance);
+int __mock_start_host_formatting(struct instance *instance, RRDHOST *host);
+int __mock_start_chart_formatting(struct instance *instance, RRDSET *st);
+int __mock_metric_formatting(struct instance *instance, RRDDIM *rd);
+int __mock_end_chart_formatting(struct instance *instance, RRDSET *st);
+int __mock_end_host_formatting(struct instance *instance, RRDHOST *host);
+int __mock_end_batch_formatting(struct instance *instance);
+
+int __wrap_simple_connector_end_batch(struct instance *instance);
+
+#if ENABLE_PROMETHEUS_REMOTE_WRITE
+void *__real_init_write_request();
+void *__wrap_init_write_request();
+
+void __real_add_host_info(
+ void *write_request_p,
+ const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp);
+void __wrap_add_host_info(
+ void *write_request_p,
+ const char *name, const char *instance, const char *application, const char *version, const int64_t timestamp);
+
+void __real_add_label(void *write_request_p, char *key, char *value);
+void __wrap_add_label(void *write_request_p, char *key, char *value);
+
+void __real_add_metric(
+ void *write_request_p,
+ const char *name, const char *chart, const char *family, const char *dimension,
+ const char *instance, const double value, const int64_t timestamp);
+void __wrap_add_metric(
+ void *write_request_p,
+ const char *name, const char *chart, const char *family, const char *dimension,
+ const char *instance, const double value, const int64_t timestamp);
+#endif /* ENABLE_PROMETHEUS_REMOTE_WRITE */
+
+#if HAVE_KINESIS
+void __wrap_aws_sdk_init();
+void __wrap_kinesis_init(
+ void *kinesis_specific_data_p, const char *region, const char *access_key_id, const char *secret_key,
+ const long timeout);
+void __wrap_kinesis_put_record(
+ void *kinesis_specific_data_p, const char *stream_name, const char *partition_key, const char *data,
+ size_t data_len);
+int __wrap_kinesis_get_result(void *request_outcomes_p, char *error_message, size_t *sent_bytes, size_t *lost_bytes);
+#endif /* HAVE_KINESIS */
+
+#if ENABLE_EXPORTING_PUBSUB
+int __wrap_pubsub_init(
+ void *pubsub_specific_data_p, char *error_message, const char *destination, const char *credentials_file,
+ const char *project_id, const char *topic_id);
+int __wrap_pubsub_add_message(void *pubsub_specific_data_p, char *data);
+int __wrap_pubsub_publish(
+ void *pubsub_specific_data_p, char *error_message, size_t buffered_metrics, size_t buffered_bytes);
+int __wrap_pubsub_get_result(
+ void *pubsub_specific_data_p, char *error_message,
+ size_t *sent_metrics, size_t *sent_bytes, size_t *lost_metrics, size_t *lost_bytes);
+#endif /* ENABLE_EXPORTING_PUBSUB */
+
+#if HAVE_MONGOC
+void __wrap_mongoc_init();
+mongoc_uri_t *__wrap_mongoc_uri_new_with_error(const char *uri_string, bson_error_t *error);
+int32_t __wrap_mongoc_uri_get_option_as_int32(const mongoc_uri_t *uri, const char *option, int32_t fallback);
+bool __wrap_mongoc_uri_set_option_as_int32(const mongoc_uri_t *uri, const char *option, int32_t value);
+mongoc_client_t *__wrap_mongoc_client_new_from_uri(const mongoc_uri_t *uri);
+bool __wrap_mongoc_client_set_appname(mongoc_client_t *client, const char *appname);
+mongoc_collection_t *
+__wrap_mongoc_client_get_collection(mongoc_client_t *client, const char *db, const char *collection);
+mongoc_collection_t *
+__real_mongoc_client_get_collection(mongoc_client_t *client, const char *db, const char *collection);
+void __wrap_mongoc_uri_destroy(mongoc_uri_t *uri);
+bool __wrap_mongoc_collection_insert_many(
+ mongoc_collection_t *collection,
+ const bson_t **documents,
+ size_t n_documents,
+ const bson_t *opts,
+ bson_t *reply,
+ bson_error_t *error);
+#endif /* HAVE_MONGOC */
+
+// -----------------------------------------------------------------------
+// fixtures
+
+int setup_configured_engine(void **state);
+int teardown_configured_engine(void **state);
+int setup_rrdhost();
+int teardown_rrdhost();
+int setup_initialized_engine(void **state);
+int teardown_initialized_engine(void **state);
+int setup_prometheus(void **state);
+int teardown_prometheus(void **state);
+
+void init_connectors_in_tests(struct engine *engine);
+
+#endif /* TEST_EXPORTING_ENGINE_H */