summaryrefslogtreecommitdiffstats
path: root/src/exporting
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/exporting/README.md327
-rw-r--r--src/exporting/TIMESCALE.md (renamed from exporting/TIMESCALE.md)2
-rw-r--r--src/exporting/WALKTHROUGH.md (renamed from exporting/WALKTHROUGH.md)6
l---------src/exporting/aws_kinesis/README.md (renamed from exporting/aws_kinesis/README.md)0
-rw-r--r--src/exporting/aws_kinesis/aws_kinesis.c (renamed from exporting/aws_kinesis/aws_kinesis.c)4
-rw-r--r--src/exporting/aws_kinesis/aws_kinesis.h (renamed from exporting/aws_kinesis/aws_kinesis.h)0
-rw-r--r--src/exporting/aws_kinesis/aws_kinesis_put_record.cc (renamed from exporting/aws_kinesis/aws_kinesis_put_record.cc)0
-rw-r--r--src/exporting/aws_kinesis/aws_kinesis_put_record.h (renamed from exporting/aws_kinesis/aws_kinesis_put_record.h)0
-rw-r--r--src/exporting/aws_kinesis/integrations/aws_kinesis.md (renamed from exporting/aws_kinesis/integrations/aws_kinesis.md)12
-rw-r--r--src/exporting/aws_kinesis/metadata.yaml173
-rw-r--r--src/exporting/check_filters.c (renamed from exporting/check_filters.c)0
-rw-r--r--src/exporting/clean_connectors.c (renamed from exporting/clean_connectors.c)0
-rw-r--r--src/exporting/exporting.conf (renamed from exporting/exporting.conf)0
-rw-r--r--src/exporting/exporting_engine.c (renamed from exporting/exporting_engine.c)19
-rw-r--r--src/exporting/exporting_engine.h (renamed from exporting/exporting_engine.h)2
l---------src/exporting/graphite/README.md (renamed from exporting/graphite/README.md)0
-rw-r--r--src/exporting/graphite/graphite.c (renamed from exporting/graphite/graphite.c)10
-rw-r--r--src/exporting/graphite/graphite.h (renamed from exporting/graphite/graphite.h)0
-rw-r--r--src/exporting/graphite/integrations/blueflood.md (renamed from exporting/graphite/integrations/blueflood.md)12
-rw-r--r--src/exporting/graphite/integrations/graphite.md (renamed from exporting/graphite/integrations/graphite.md)12
-rw-r--r--src/exporting/graphite/integrations/influxdb.md172
-rw-r--r--src/exporting/graphite/integrations/kairosdb.md (renamed from exporting/graphite/integrations/kairosdb.md)12
-rw-r--r--src/exporting/graphite/metadata.yaml212
-rw-r--r--src/exporting/init_connectors.c (renamed from exporting/init_connectors.c)16
l---------src/exporting/json/README.md (renamed from exporting/json/README.md)0
-rw-r--r--src/exporting/json/integrations/json.md (renamed from exporting/json/integrations/json.md)12
-rw-r--r--src/exporting/json/json.c313
-rw-r--r--src/exporting/json/json.h (renamed from exporting/json/json.h)0
-rw-r--r--src/exporting/json/metadata.yaml151
l---------src/exporting/mongodb/README.md (renamed from exporting/mongodb/README.md)0
-rw-r--r--src/exporting/mongodb/integrations/mongodb.md145
-rw-r--r--src/exporting/mongodb/metadata.yaml151
-rw-r--r--src/exporting/mongodb/mongodb.c (renamed from exporting/mongodb/mongodb.c)4
-rw-r--r--src/exporting/mongodb/mongodb.h (renamed from exporting/mongodb/mongodb.h)0
-rwxr-xr-xsrc/exporting/nc-exporting.sh (renamed from exporting/nc-exporting.sh)0
l---------src/exporting/opentsdb/README.md (renamed from exporting/opentsdb/README.md)0
-rw-r--r--src/exporting/opentsdb/integrations/opentsdb.md (renamed from exporting/opentsdb/integrations/opentsdb.md)12
-rw-r--r--src/exporting/opentsdb/metadata.yaml176
-rw-r--r--src/exporting/opentsdb/opentsdb.c (renamed from exporting/opentsdb/opentsdb.c)18
-rw-r--r--src/exporting/opentsdb/opentsdb.h (renamed from exporting/opentsdb/opentsdb.h)0
-rw-r--r--src/exporting/process_data.c (renamed from exporting/process_data.c)10
-rw-r--r--src/exporting/prometheus/README.md361
-rw-r--r--src/exporting/prometheus/integrations/appoptics.md (renamed from exporting/prometheus/integrations/appoptics.md)12
-rw-r--r--src/exporting/prometheus/integrations/azure_data_explorer.md (renamed from exporting/prometheus/integrations/azure_data_explorer.md)12
-rw-r--r--src/exporting/prometheus/integrations/azure_event_hub.md (renamed from exporting/prometheus/integrations/azure_event_hub.md)12
-rw-r--r--src/exporting/prometheus/integrations/chronix.md (renamed from exporting/prometheus/integrations/chronix.md)12
-rw-r--r--src/exporting/prometheus/integrations/cortex.md (renamed from exporting/prometheus/integrations/cortex.md)12
-rw-r--r--src/exporting/prometheus/integrations/cratedb.md (renamed from exporting/prometheus/integrations/cratedb.md)12
-rw-r--r--src/exporting/prometheus/integrations/elasticsearch.md158
-rw-r--r--src/exporting/prometheus/integrations/gnocchi.md (renamed from exporting/prometheus/integrations/gnocchi.md)12
-rw-r--r--src/exporting/prometheus/integrations/google_bigquery.md (renamed from exporting/prometheus/integrations/google_bigquery.md)12
-rw-r--r--src/exporting/prometheus/integrations/greptimedb.md158
-rw-r--r--src/exporting/prometheus/integrations/irondb.md (renamed from exporting/prometheus/integrations/irondb.md)12
-rw-r--r--src/exporting/prometheus/integrations/kafka.md158
-rw-r--r--src/exporting/prometheus/integrations/m3db.md (renamed from exporting/prometheus/integrations/m3db.md)12
-rw-r--r--src/exporting/prometheus/integrations/metricfire.md (renamed from exporting/prometheus/integrations/metricfire.md)12
-rw-r--r--src/exporting/prometheus/integrations/new_relic.md158
-rw-r--r--src/exporting/prometheus/integrations/opeansearch.md158
-rw-r--r--src/exporting/prometheus/integrations/postgresql.md158
-rw-r--r--src/exporting/prometheus/integrations/prometheus_remote_write.md (renamed from exporting/prometheus/integrations/prometheus_remote_write.md)12
-rw-r--r--src/exporting/prometheus/integrations/quasardb.md (renamed from exporting/prometheus/integrations/quasardb.md)12
-rw-r--r--src/exporting/prometheus/integrations/splunk_signalfx.md (renamed from exporting/prometheus/integrations/splunk_signalfx.md)12
-rw-r--r--src/exporting/prometheus/integrations/thanos.md (renamed from exporting/prometheus/integrations/thanos.md)12
-rw-r--r--src/exporting/prometheus/integrations/tikv.md (renamed from exporting/prometheus/integrations/tikv.md)12
-rw-r--r--src/exporting/prometheus/integrations/timescaledb.md (renamed from exporting/prometheus/integrations/timescaledb.md)12
-rw-r--r--src/exporting/prometheus/integrations/victoriametrics.md (renamed from exporting/prometheus/integrations/victoriametrics.md)12
-rw-r--r--src/exporting/prometheus/integrations/vmware_aria.md (renamed from exporting/prometheus/integrations/vmware_aria.md)12
-rw-r--r--src/exporting/prometheus/integrations/wavefront.md (renamed from exporting/prometheus/integrations/wavefront.md)12
-rw-r--r--src/exporting/prometheus/metadata.yaml460
-rw-r--r--src/exporting/prometheus/prometheus.c1020
-rw-r--r--src/exporting/prometheus/prometheus.h (renamed from exporting/prometheus/prometheus.h)9
l---------src/exporting/prometheus/remote_write/README.md (renamed from exporting/prometheus/remote_write/README.md)0
-rw-r--r--src/exporting/prometheus/remote_write/remote_write.c403
-rw-r--r--src/exporting/prometheus/remote_write/remote_write.h (renamed from exporting/prometheus/remote_write/remote_write.h)0
-rw-r--r--src/exporting/prometheus/remote_write/remote_write.proto (renamed from exporting/prometheus/remote_write/remote_write.proto)0
-rw-r--r--src/exporting/prometheus/remote_write/remote_write_request.cc (renamed from exporting/prometheus/remote_write/remote_write_request.cc)0
-rw-r--r--src/exporting/prometheus/remote_write/remote_write_request.h (renamed from exporting/prometheus/remote_write/remote_write_request.h)0
l---------src/exporting/pubsub/README.md (renamed from exporting/pubsub/README.md)0
-rw-r--r--src/exporting/pubsub/integrations/google_cloud_pub_sub.md (renamed from exporting/pubsub/integrations/google_cloud_pub_sub.md)12
-rw-r--r--src/exporting/pubsub/metadata.yaml152
-rw-r--r--src/exporting/pubsub/pubsub.c (renamed from exporting/pubsub/pubsub.c)4
-rw-r--r--src/exporting/pubsub/pubsub.h (renamed from exporting/pubsub/pubsub.h)0
-rw-r--r--src/exporting/pubsub/pubsub_publish.cc (renamed from exporting/pubsub/pubsub_publish.cc)0
-rw-r--r--src/exporting/pubsub/pubsub_publish.h (renamed from exporting/pubsub/pubsub_publish.h)0
-rw-r--r--src/exporting/read_config.c (renamed from exporting/read_config.c)0
-rw-r--r--src/exporting/sample-metadata.yaml (renamed from exporting/sample-metadata.yaml)0
-rw-r--r--src/exporting/send_data.c (renamed from exporting/send_data.c)8
-rw-r--r--src/exporting/send_internal_metrics.c (renamed from exporting/send_internal_metrics.c)0
88 files changed, 5382 insertions, 218 deletions
diff --git a/src/exporting/README.md b/src/exporting/README.md
new file mode 100644
index 000000000..83b391f72
--- /dev/null
+++ b/src/exporting/README.md
@@ -0,0 +1,327 @@
+<!--
+title: "Exporting reference"
+description: "With the exporting engine, you can archive your Netdata metrics to multiple external databases for long-term storage or further analysis."
+sidebar_label: "Export"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/README.md"
+learn_status: "Published"
+learn_rel_path: "Integrations/Export"
+learn_doc_purpose: "Explain the exporting engine options and all of our the exporting connectors options"
+-->
+
+# Exporting reference
+
+Welcome to the exporting engine reference guide. This guide contains comprehensive information about enabling,
+configuring, and monitoring Netdata's exporting engine, which allows you to send metrics to external time-series
+databases.
+
+For a quick introduction to the exporting engine's features, read our doc on [exporting metrics to time-series
+databases](/docs/exporting-metrics/README.md), or jump in to [enabling a connector](/docs/exporting-metrics/enable-an-exporting-connector.md).
+
+The exporting engine has a modular structure and supports metric exporting via multiple exporting connector instances at
+the same time. You can have different update intervals and filters configured for every exporting connector instance.
+
+When you enable the exporting engine and a connector, the Netdata Agent exports metrics _beginning from the time you
+restart its process_, not the entire [database of long-term metrics](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md).
+
+Since Netdata collects thousands of metrics per server per second, which would easily congest any database server when
+several Netdata servers are sending data to it, Netdata allows sending metrics at a lower frequency, by resampling them.
+
+So, although Netdata collects metrics every second, it can send to the external database servers averages or sums every
+X seconds (though, it can send them per second if you need it to).
+
+## Features
+
+### Integration
+
+The exporting engine uses a number of connectors to send Netdata metrics to external time-series databases. See our
+[list of supported databases](/docs/exporting-metrics/README.md#supported-databases) for information on which
+connector to enable and configure for your database of choice.
+
+- [**AWS Kinesis Data Streams**](/src/exporting/aws_kinesis/README.md): Metrics are sent to the service in `JSON`
+ format.
+- [**Google Cloud Pub/Sub Service**](/src/exporting/pubsub/README.md): Metrics are sent to the service in `JSON`
+ format.
+- [**Graphite**](/src/exporting/graphite/README.md): A plaintext interface. Metrics are sent to the database server as
+ `prefix.hostname.chart.dimension`. `prefix` is configured below, `hostname` is the hostname of the machine (can
+ also be configured). Learn more in our guide to [export and visualize Netdata metrics in
+ Graphite](/src/exporting/graphite/README.md).
+- [**JSON** document databases](/src/exporting/json/README.md)
+- [**OpenTSDB**](/src/exporting/opentsdb/README.md): Use a plaintext or HTTP interfaces. Metrics are sent to
+ OpenTSDB as `prefix.chart.dimension` with tag `host=hostname`.
+- [**MongoDB**](/src/exporting/mongodb/README.md): Metrics are sent to the database in `JSON` format.
+- [**Prometheus**](/src/exporting/prometheus/README.md): Use an existing Prometheus installation to scrape metrics
+ from node using the Netdata API.
+- [**Prometheus remote write**](/src/exporting/prometheus/remote_write/README.md). A binary snappy-compressed protocol
+ buffer encoding over HTTP. Supports many [storage
+ providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
+- [**TimescaleDB**](/src/exporting/TIMESCALE.md): Use a community-built connector that takes JSON streams from a
+ Netdata client and writes them to a TimescaleDB table.
+
+### Chart filtering
+
+Netdata can filter metrics, to send only a subset of the collected metrics. You can use the
+configuration file
+
+```txt
+[prometheus:exporter]
+ send charts matching = system.*
+```
+
+or the URL parameter `filter` in the `allmetrics` API call.
+
+```txt
+http://localhost:19999/api/v1/allmetrics?format=shell&filter=system.*
+```
+
+### Operation modes
+
+Netdata supports three modes of operation for all exporting connectors:
+
+- `as-collected` sends to external databases the metrics as they are collected, in the units they are collected.
+ So, counters are sent as counters and gauges are sent as gauges, much like all data collectors do. For example,
+ to calculate CPU utilization in this format, you need to know how to convert kernel ticks to percentage.
+
+- `average` sends to external databases normalized metrics from the Netdata database. In this mode, all metrics
+ are sent as gauges, in the units Netdata uses. This abstracts data collection and simplifies visualization, but
+ you will not be able to copy and paste queries from other sources to convert units. For example, CPU utilization
+ percentage is calculated by Netdata, so Netdata will convert ticks to percentage and send the average percentage
+ to the external database.
+
+- `sum` or `volume`: the sum of the interpolated values shown on the Netdata graphs is sent to the external
+ database. So, if Netdata is configured to send data to the database every 10 seconds, the sum of the 10 values
+ shown on the Netdata charts will be used.
+
+Time-series databases suggest to collect the raw values (`as-collected`). If you plan to invest on building your
+monitoring around a time-series database and you already know (or you will invest in learning) how to convert units
+and normalize the metrics in Grafana or other visualization tools, we suggest to use `as-collected`.
+
+If, on the other hand, you just need long term archiving of Netdata metrics and you plan to mainly work with
+Netdata, we suggest to use `average`. It decouples visualization from data collection, so it will generally be a lot
+simpler. Furthermore, if you use `average`, the charts shown in the external service will match exactly what you
+see in Netdata, which is not necessarily true for the other modes of operation.
+
+### Independent operation
+
+This code is smart enough, not to slow down Netdata, independently of the speed of the external database server.
+
+> ❗ You should keep in mind though that many exporting connector instances can consume a lot of CPU resources if they
+> run their batches at the same time. You can set different update intervals for every exporting connector instance,
+> but even in that case they can occasionally synchronize their batches for a moment.
+
+## Configuration
+
+Here are the configuration blocks for every supported connector. Your current `exporting.conf` file may look a little
+different.
+
+You can configure each connector individually using the available [options](#options). The
+`[graphite:my_graphite_instance]` block contains examples of some of these additional options in action.
+
+```conf
+[exporting:global]
+ enabled = yes
+ send configured labels = no
+ send automatic labels = no
+ update every = 10
+
+[prometheus:exporter]
+ send names instead of ids = yes
+ send configured labels = yes
+ send automatic labels = no
+ send charts matching = *
+ send hosts matching = localhost *
+ prefix = netdata
+
+[graphite:my_graphite_instance]
+ enabled = yes
+ destination = localhost:2003
+ data source = average
+ prefix = Netdata
+ hostname = my-name
+ update every = 10
+ buffer on failures = 10
+ timeout ms = 20000
+ send charts matching = *
+ send hosts matching = localhost *
+ send names instead of ids = yes
+ send configured labels = yes
+ send automatic labels = yes
+
+[prometheus_remote_write:my_prometheus_remote_write_instance]
+ enabled = yes
+ destination = localhost
+ remote write URL path = /receive
+
+[kinesis:my_kinesis_instance]
+ enabled = yes
+ destination = us-east-1
+ stream name = netdata
+ aws_access_key_id = my_access_key_id
+ aws_secret_access_key = my_aws_secret_access_key
+
+[pubsub:my_pubsub_instance]
+ enabled = yes
+ destination = pubsub.googleapis.com
+ credentials file = /etc/netdata/pubsub_credentials.json
+ project id = my_project
+ topic id = my_topic
+
+[mongodb:my_mongodb_instance]
+ enabled = yes
+ destination = localhost
+ database = my_database
+ collection = my_collection
+
+[json:my_json_instance]
+ enabled = yes
+ destination = localhost:5448
+
+[opentsdb:my_opentsdb_plaintext_instance]
+ enabled = yes
+ destination = localhost:4242
+
+[opentsdb:http:my_opentsdb_http_instance]
+ enabled = yes
+ destination = localhost:4242
+ username = my_username
+ password = my_password
+
+[opentsdb:https:my_opentsdb_https_instance]
+ enabled = yes
+ destination = localhost:8082
+```
+
+### Sections
+
+- `[exporting:global]` is a section where you can set your defaults for all exporting connectors
+- `[prometheus:exporter]` defines settings for Prometheus exporter API queries (e.g.:
+ `http://NODE:19999/api/v1/allmetrics?format=prometheus&help=yes&source=as-collected`).
+- `[<type>:<name>]` keeps settings for a particular exporting connector instance, where:
+ - `type` selects the exporting connector type: graphite | opentsdb:telnet | opentsdb:http |
+ prometheus_remote_write | json | kinesis | pubsub | mongodb. For graphite, opentsdb,
+ json, and prometheus_remote_write connectors you can also use `:http` or `:https` modifiers
+ (e.g.: `opentsdb:https`).
+ - `name` can be arbitrary instance name you chose.
+
+### Options
+
+Configure individual connectors and override any global settings with the following options.
+
+- `enabled = yes | no`, enables or disables an exporting connector instance
+
+- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, IPs (IPv4 and IPv6) and
+ ports to connect to. Netdata will use the **first available** to send the metrics.
+
+ The format of each item in this list, is: `[PROTOCOL:]IP[:PORT]`.
+
+ `PROTOCOL` can be `udp` or `tcp`. `tcp` is the default and only supported by the current exporting engine.
+
+ `IP` can be `XX.XX.XX.XX` (IPv4), or `[XX:XX...XX:XX]` (IPv6). For IPv6 you can to enclose the IP in `[]` to
+ separate it from the port.
+
+ `PORT` can be a number of a service name. If omitted, the default port for the exporting connector will be used
+ (graphite = 2003, opentsdb = 4242).
+
+ Example IPv4:
+
+```conf
+ destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242
+```
+
+ Example IPv6 and IPv4 together:
+
+```conf
+ destination = [ffff:...:0001]:2003 10.11.12.1:2003
+```
+
+ When multiple servers are defined, Netdata will try the next one when the previous one fails.
+
+ Netdata also ships `nc-exporting.sh`, a script that can be used as a fallback exporting connector to save the
+ metrics to disk and push them to the time-series database when it becomes available again. It can also be used to
+ monitor / trace / debug the metrics Netdata generates.
+
+ For the Kinesis exporting connector `destination` should be set to an AWS region (for example, `us-east-1`).
+
+ For the MongoDB exporting connector `destination` should be set to a
+ [MongoDB URI](https://docs.mongodb.com/manual/reference/connection-string/).
+
+ For the Pub/Sub exporting connector `destination` can be set to a specific service endpoint.
+
+- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of data that will
+ be sent to the external database.
+
+- `hostname = my-name`, is the hostname to be used for sending data to the external database server. By default this
+ is `[global].hostname`.
+
+- `prefix = Netdata`, is the prefix to add to all metrics.
+
+- `update every = 10`, is the number of seconds between sending data to the external database. Netdata will add some
+ randomness to this number, to prevent stressing the external server when many Netdata servers send data to the same
+ database. This randomness does not affect the quality of the data, only the time they are sent.
+
+- `buffer on failures = 10`, is the number of iterations (each iteration is `update every` seconds) to buffer data,
+ when the external database server is not available. If the server fails to receive the data after that many
+ failures, data loss on the connector instance is expected (Netdata will also log it).
+
+- `timeout ms = 20000`, is the timeout in milliseconds to wait for the external database server to process the data.
+ By default this is `2 * update_every * 1000`.
+
+- `send hosts matching = localhost *` includes one or more space separated patterns, using `*` as wildcard (any number
+ of times within each pattern). The patterns are checked against the hostname (the localhost is always checked as
+ `localhost`), allowing us to filter which hosts will be sent to the external database when this Netdata is a central
+ Netdata aggregating multiple hosts. A pattern starting with `!` gives a negative match. So to match all hosts named
+ `*db*` except hosts containing `*child*`, use `!*child* *db*` (so, the order is important: the first
+ pattern matching the hostname will be used - positive or negative).
+
+- `send charts matching = *` includes one or more space separated patterns, using `*` as wildcard (any number of times
+ within each pattern). The patterns are checked against both chart id and chart name. A pattern starting with `!`
+ gives a negative match. So to match all charts named `apps.*` except charts ending in `*reads`, use `!*reads
+ apps.*` (so, the order is important: the first pattern matching the chart id or the chart name will be used -
+ positive or negative). There is also a URL parameter `filter` that can be used while querying `allmetrics`. The URL
+ parameter has a higher priority than the configuration option.
+
+- `send names instead of ids = yes | no` controls the metric names Netdata should send to the external database.
+ Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system
+ and names are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several
+ cases they are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+
+- `send configured labels = yes | no` controls if host labels defined in the `[host labels]` section in `netdata.conf`
+ should be sent to the external database
+
+- `send automatic labels = yes | no` controls if automatically created labels, like `_os_name` or `_architecture`
+ should be sent to the external database
+
+## HTTPS
+
+Netdata can send metrics to external databases using the TLS/SSL protocol. Unfortunately, some of
+them does not support encrypted connections, so you will have to configure a reverse proxy to enable
+HTTPS communication between Netdata and an external database. You can set up a reverse proxy with
+[Nginx](/docs/netdata-agent/configuration/running-the-netdata-agent-behind-a-reverse-proxy/Running-behind-nginx.md).
+
+## Exporting engine monitoring
+
+Netdata creates five charts in the dashboard, under the **Netdata Monitoring** section, to help you monitor the health
+and performance of the exporting engine itself:
+
+1. **Buffered metrics**, the number of metrics Netdata added to the buffer for dispatching them to the
+ external database server.
+
+2. **Exporting data size**, the amount of data (in KB) Netdata added the buffer.
+
+3. **Exporting operations**, the number of operations performed by Netdata.
+
+4. **Exporting thread CPU usage**, the CPU resources consumed by the Netdata thread, that is responsible for sending
+ the metrics to the external database server.
+
+![image](https://cloud.githubusercontent.com/assets/2662304/20463536/eb196084-af3d-11e6-8ee5-ddbd3b4d8449.png)
+
+## Exporting engine alerts
+
+Netdata adds 3 alerts:
+
+1. `exporting_last_buffering`, number of seconds since the last successful buffering of exported data
+2. `exporting_metrics_sent`, percentage of metrics sent to the external database server
+3. `exporting_metrics_lost`, number of metrics lost due to repeating failures to contact the external database server
+
+![image](https://cloud.githubusercontent.com/assets/2662304/20463779/a46ed1c2-af43-11e6-91a5-07ca4533cac3.png)
+
+
diff --git a/exporting/TIMESCALE.md b/src/exporting/TIMESCALE.md
index 8ca61b75e..3bad28379 100644
--- a/exporting/TIMESCALE.md
+++ b/src/exporting/TIMESCALE.md
@@ -1,7 +1,7 @@
<!--
title: "Writing metrics to TimescaleDB"
description: "Send Netdata metrics to TimescaleDB for long-term archiving and further analysis."
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/TIMESCALE.md"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/TIMESCALE.md"
sidebar_label: "Writing metrics to TimescaleDB"
learn_status: "Published"
learn_rel_path: "Integrations/Export"
diff --git a/exporting/WALKTHROUGH.md b/src/exporting/WALKTHROUGH.md
index 86be758e4..450789d9d 100644
--- a/exporting/WALKTHROUGH.md
+++ b/src/exporting/WALKTHROUGH.md
@@ -57,13 +57,13 @@ command to run (`/bin/bash`) and then chooses the base container images (`centos
be sitting inside the shell of the container.
After we have entered the shell we can install Netdata. This process could not be easier. If you take a look at [this
-link](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md), the Netdata devs give us several one-liners to install Netdata. I have not had
+link](/packaging/installer/README.md), the Netdata devs give us several one-liners to install Netdata. I have not had
any issues with these one liners and their bootstrapping scripts so far (If you guys run into anything do share). Run
the following command in your container.
<!-- candidate for reuse -->
```sh
-wget -O /tmp/netdata-kickstart.sh https://my-netdata.io/kickstart.sh && sh /tmp/netdata-kickstart.sh --dont-wait
+wget -O /tmp/netdata-kickstart.sh https://get.netdata.cloud/kickstart.sh && sh /tmp/netdata-kickstart.sh --dont-wait
```
After the install completes you should be able to hit the Netdata dashboard at <http://localhost:19999/> (replace
@@ -217,7 +217,7 @@ the `chart` dimension. If you'd like you can combine the `chart` and `instance`
Let's give this a try: `netdata_system_cpu_percentage_average{chart="system.cpu", instance="netdata:19999"}`
This is the basics of using Prometheus to query Netdata. I'd advise everyone at this point to read [this
-page](https://github.com/netdata/netdata/blob/master/exporting/prometheus/README.md#using-netdata-with-prometheus). The key point here is that Netdata can export metrics from
+page](/src/exporting/prometheus/README.md#using-netdata-with-prometheus). The key point here is that Netdata can export metrics from
its internal DB or can send metrics _as-collected_ by specifying the `source=as-collected` URL parameter like so.
<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes&types=yes&source=as-collected> If you choose to use
this method you will need to use Prometheus's set of functions here: <https://prometheus.io/docs/querying/functions/> to
diff --git a/exporting/aws_kinesis/README.md b/src/exporting/aws_kinesis/README.md
index dbc98ac13..dbc98ac13 120000
--- a/exporting/aws_kinesis/README.md
+++ b/src/exporting/aws_kinesis/README.md
diff --git a/exporting/aws_kinesis/aws_kinesis.c b/src/exporting/aws_kinesis/aws_kinesis.c
index 498d9ee23..27f2fb0ba 100644
--- a/exporting/aws_kinesis/aws_kinesis.c
+++ b/src/exporting/aws_kinesis/aws_kinesis.c
@@ -100,6 +100,10 @@ void aws_kinesis_connector_worker(void *instance_p)
struct aws_kinesis_specific_config *connector_specific_config = instance->config.connector_specific_config;
struct aws_kinesis_specific_data *connector_specific_data = instance->connector_specific_data;
+ char threadname[ND_THREAD_TAG_MAX + 1];
+ snprintfz(threadname, ND_THREAD_TAG_MAX, "EXPKNSS[%zu]", instance->index);
+ uv_thread_set_name_np(threadname);
+
while (!instance->engine->exit) {
unsigned long long partition_key_seq = 0;
struct stats *stats = &instance->stats;
diff --git a/exporting/aws_kinesis/aws_kinesis.h b/src/exporting/aws_kinesis/aws_kinesis.h
index d88a45861..d88a45861 100644
--- a/exporting/aws_kinesis/aws_kinesis.h
+++ b/src/exporting/aws_kinesis/aws_kinesis.h
diff --git a/exporting/aws_kinesis/aws_kinesis_put_record.cc b/src/exporting/aws_kinesis/aws_kinesis_put_record.cc
index 62c6b0301..62c6b0301 100644
--- a/exporting/aws_kinesis/aws_kinesis_put_record.cc
+++ b/src/exporting/aws_kinesis/aws_kinesis_put_record.cc
diff --git a/exporting/aws_kinesis/aws_kinesis_put_record.h b/src/exporting/aws_kinesis/aws_kinesis_put_record.h
index 321baf669..321baf669 100644
--- a/exporting/aws_kinesis/aws_kinesis_put_record.h
+++ b/src/exporting/aws_kinesis/aws_kinesis_put_record.h
diff --git a/exporting/aws_kinesis/integrations/aws_kinesis.md b/src/exporting/aws_kinesis/integrations/aws_kinesis.md
index deff55be7..633729c74 100644
--- a/exporting/aws_kinesis/integrations/aws_kinesis.md
+++ b/src/exporting/aws_kinesis/integrations/aws_kinesis.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/aws_kinesis/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/aws_kinesis/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/aws_kinesis/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/aws_kinesis/metadata.yaml"
sidebar_label: "AWS Kinesis"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -50,7 +50,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -62,7 +62,7 @@ Netdata automatically computes a partition key for every record with the purpose
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -76,7 +76,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/src/exporting/aws_kinesis/metadata.yaml b/src/exporting/aws_kinesis/metadata.yaml
new file mode 100644
index 000000000..806b5cbac
--- /dev/null
+++ b/src/exporting/aws_kinesis/metadata.yaml
@@ -0,0 +1,173 @@
+# yamllint disable rule:line-length
+---
+id: 'export-aws-kinesis'
+meta:
+ name: 'AWS Kinesis'
+ link: 'https://aws.amazon.com/kinesis/'
+ categories:
+ - export
+ icon_filename: 'aws-kinesis.svg'
+keywords:
+ - exporter
+ - AWS
+ - Kinesis
+overview:
+ exporter_description: |
+ Export metrics to AWS Kinesis Data Streams
+ exporter_limitations: ''
+setup:
+ prerequisites:
+ list:
+ - title: ''
+ description: |
+ - First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++
+ - Here are the instructions when building from source, to ensure 3rd party dependencies are installed:
+ ```bash
+ git clone --recursive https://github.com/aws/aws-sdk-cpp.git
+ cd aws-sdk-cpp/
+ git submodule update --init --recursive
+ mkdir BUILT
+ cd BUILT
+ cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis ..
+ make
+ make install
+ ```
+ - `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled.
+ - Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.
+ configuration:
+ file:
+ name: 'exporting.conf'
+ options:
+ description: |
+ Netdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly.
+ The following options can be defined for this exporter.
+ folding:
+ title: 'Config options'
+ enabled: true
+ list:
+ - name: 'enabled'
+ default_value: 'no'
+ description: 'Enables or disables an exporting connector instance (yes|no).'
+ required: true
+ - name: 'destination'
+ default_value: 'no'
+ description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
+ required: true
+ detailed_description: |
+ The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+ - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+ - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+ - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
+
+ Example IPv4:
+ ```yaml
+ destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242
+ ```
+ Example IPv6 and IPv4 together:
+ ```yaml
+ destination = [ffff:...:0001]:2003 10.11.12.1:2003
+ ```
+ When multiple servers are defined, Netdata will try the next one when the previous one fails.
+ - name: 'username'
+ default_value: 'my_username'
+ description: 'Username for HTTP authentication'
+ required: false
+ - name: 'password'
+ default_value: 'my_password'
+ description: 'Password for HTTP authentication'
+ required: false
+ - name: 'data source'
+ default_value: ''
+ description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
+ required: false
+ - name: 'hostname'
+ default_value: '[global].hostname'
+ description: 'The hostname to be used for sending data to the external database server.'
+ required: false
+ - name: 'prefix'
+ default_value: 'Netdata'
+ description: 'The prefix to add to all metrics.'
+ required: false
+ - name: 'update every'
+ default_value: '10'
+ description: |
+ Frequency of sending sending data to the external database, in seconds.
+ required: false
+ detailed_description: |
+ Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+ send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+ - name: 'buffer on failures'
+ default_value: '10'
+ description: |
+ The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
+ required: false
+ detailed_description: |
+ If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+ - name: 'timeout ms'
+ default_value: '2 * update_every * 1000'
+ description: 'The timeout in milliseconds to wait for the external database server to process the data.'
+ required: false
+ - name: 'send hosts matching'
+ default_value: 'localhost *'
+ description: |
+ Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns).
+ required: false
+ detailed_description: |
+ Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+ The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+ filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+ A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+ use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+ - name: 'send charts matching'
+ default_value: '*'
+ description: |
+ One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
+ required: false
+ detailed_description: |
+ A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+ use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+ positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+ has a higher priority than the configuration option.
+ - name: 'send names instead of ids'
+ default_value: ''
+ description: 'Controls the metric names Netdata should send to the external database (yes|no).'
+ required: false
+ detailed_description: |
+ Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+ are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+ different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+ - name: 'send configured labels'
+ default_value: ''
+ description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
+ required: false
+ - name: 'send automatic labels'
+ default_value: ''
+ description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ''
+ list:
+ - name: 'Example configuration'
+ folding:
+ enabled: false
+ description: 'Basic configuration'
+ config: |
+ [kinesis:my_instance]
+ enabled = yes
+ destination = us-east-1
+ - name: 'Configuration with AWS credentials'
+ folding:
+ enabled: false
+ description: 'Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.'
+ config: |
+ [kinesis:my_instance]
+ enabled = yes
+ destination = us-east-1
+ # AWS credentials
+ aws_access_key_id = your_access_key_id
+ aws_secret_access_key = your_secret_access_key
+ # destination stream
+ stream name = your_stream_name
diff --git a/exporting/check_filters.c b/src/exporting/check_filters.c
index 19eecc9bc..19eecc9bc 100644
--- a/exporting/check_filters.c
+++ b/src/exporting/check_filters.c
diff --git a/exporting/clean_connectors.c b/src/exporting/clean_connectors.c
index c850c5ffa..c850c5ffa 100644
--- a/exporting/clean_connectors.c
+++ b/src/exporting/clean_connectors.c
diff --git a/exporting/exporting.conf b/src/exporting/exporting.conf
index c43b2af9e..c43b2af9e 100644
--- a/exporting/exporting.conf
+++ b/src/exporting/exporting.conf
diff --git a/exporting/exporting_engine.c b/src/exporting/exporting_engine.c
index f42a36e92..eb5f8a0a8 100644
--- a/exporting/exporting_engine.c
+++ b/src/exporting/exporting_engine.c
@@ -51,7 +51,7 @@ void analytics_exporting_connectors(BUFFER *b)
buffer_strcat(b, "OpenTSDBHTTP");
break;
case EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE:
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
buffer_strcat(b, "PrometheusRemoteWrite");
#endif
break;
@@ -66,7 +66,7 @@ void analytics_exporting_connectors(BUFFER *b)
#endif
break;
case EXPORTING_CONNECTOR_TYPE_MONGODB:
-#if HAVE_MONGOC
+#ifdef HAVE_MONGOC
buffer_strcat(b, "MongoDB");
#endif
break;
@@ -95,7 +95,7 @@ static void exporting_clean_engine()
aws_sdk_shutdown();
#endif
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
if (engine->protocol_buffers_initialized)
protocol_buffers_shutdown();
#endif
@@ -119,9 +119,11 @@ static void exporting_clean_engine()
*
* @param ptr thread data.
*/
-static void exporting_main_cleanup(void *ptr)
+static void exporting_main_cleanup(void *pptr)
{
- struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ struct netdata_static_thread *static_thread = CLEANUP_FUNCTION_GET_PTR(pptr);
+ if(!static_thread) return;
+
static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
netdata_log_info("cleaning up...");
@@ -174,7 +176,7 @@ static void exporting_main_cleanup(void *ptr)
*/
void *exporting_main(void *ptr)
{
- netdata_thread_cleanup_push(exporting_main_cleanup, ptr);
+ CLEANUP_FUNCTION_REGISTER(exporting_main_cleanup) cleanup_ptr = ptr;
engine = read_exporting_config();
if (!engine) {
@@ -184,7 +186,9 @@ void *exporting_main(void *ptr)
if (init_connectors(engine) != 0) {
netdata_log_error("EXPORTING: cannot initialize exporting connectors");
- send_statistics("EXPORTING_START", "FAIL", "-");
+
+ analytics_statistic_t statistic = { "EXPORTING_START", "FAIL", "-" };
+ analytics_statistic_send(&statistic);
goto cleanup;
}
@@ -212,6 +216,5 @@ void *exporting_main(void *ptr)
}
cleanup:
- netdata_thread_cleanup_pop(1);
return NULL;
}
diff --git a/exporting/exporting_engine.h b/src/exporting/exporting_engine.h
index fb09b771a..beaa0ba87 100644
--- a/exporting/exporting_engine.h
+++ b/src/exporting/exporting_engine.h
@@ -311,7 +311,7 @@ static inline void disable_instance(struct instance *instance)
#include "exporting/prometheus/prometheus.h"
#include "exporting/opentsdb/opentsdb.h"
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
#include "exporting/prometheus/remote_write/remote_write.h"
#endif
diff --git a/exporting/graphite/README.md b/src/exporting/graphite/README.md
index 15f360d17..15f360d17 120000
--- a/exporting/graphite/README.md
+++ b/src/exporting/graphite/README.md
diff --git a/exporting/graphite/graphite.c b/src/exporting/graphite/graphite.c
index 254db982e..1fc1f2b04 100644
--- a/exporting/graphite/graphite.c
+++ b/src/exporting/graphite/graphite.c
@@ -74,7 +74,7 @@ int init_graphite_instance(struct instance *instance)
void sanitize_graphite_label_value(char *dst, const char *src, size_t len)
{
while (*src != '\0' && len) {
- if (isspace(*src) || *src == ';' || *src == '~')
+ if (isspace((uint8_t)*src) || *src == ';' || *src == '~')
*dst++ = '_';
else
*dst++ = *src;
@@ -133,13 +133,11 @@ int format_dimension_collected_graphite_plaintext(struct instance *instance, RRD
buffer_sprintf(
instance->buffer,
- "%s.%s.%s.%s%s%s%s " COLLECTED_NUMBER_FORMAT " %llu\n",
+ "%s.%s.%s.%s%s " COLLECTED_NUMBER_FORMAT " %llu\n",
instance->config.prefix,
(host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
chart_name,
dimension_name,
- (host->tags) ? ";" : "",
- (host->tags) ? rrdhost_tags(host) : "",
(instance->labels_buffer) ? buffer_tostring(instance->labels_buffer) : "",
rd->collector.last_collected_value,
(unsigned long long)rd->collector.last_collected_time.tv_sec);
@@ -179,13 +177,11 @@ int format_dimension_stored_graphite_plaintext(struct instance *instance, RRDDIM
buffer_sprintf(
instance->buffer,
- "%s.%s.%s.%s%s%s%s " NETDATA_DOUBLE_FORMAT " %llu\n",
+ "%s.%s.%s.%s%s " NETDATA_DOUBLE_FORMAT " %llu\n",
instance->config.prefix,
(host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
chart_name,
dimension_name,
- (host->tags) ? ";" : "",
- (host->tags) ? rrdhost_tags(host) : "",
(instance->labels_buffer) ? buffer_tostring(instance->labels_buffer) : "",
value,
(unsigned long long)last_t);
diff --git a/exporting/graphite/graphite.h b/src/exporting/graphite/graphite.h
index 79f87e46e..79f87e46e 100644
--- a/exporting/graphite/graphite.h
+++ b/src/exporting/graphite/graphite.h
diff --git a/exporting/graphite/integrations/blueflood.md b/src/exporting/graphite/integrations/blueflood.md
index a4c3c9793..56220fb6a 100644
--- a/exporting/graphite/integrations/blueflood.md
+++ b/src/exporting/graphite/integrations/blueflood.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/graphite/integrations/blueflood.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/graphite/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/graphite/integrations/blueflood.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/graphite/metadata.yaml"
sidebar_label: "Blueflood"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -38,7 +38,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -48,7 +48,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -62,7 +62,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/graphite/integrations/graphite.md b/src/exporting/graphite/integrations/graphite.md
index fec988027..c38b1aac4 100644
--- a/exporting/graphite/integrations/graphite.md
+++ b/src/exporting/graphite/integrations/graphite.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/graphite/integrations/graphite.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/graphite/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/graphite/integrations/graphite.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/graphite/metadata.yaml"
sidebar_label: "Graphite"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -38,7 +38,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -48,7 +48,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -62,7 +62,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/src/exporting/graphite/integrations/influxdb.md b/src/exporting/graphite/integrations/influxdb.md
new file mode 100644
index 000000000..4d49febe0
--- /dev/null
+++ b/src/exporting/graphite/integrations/influxdb.md
@@ -0,0 +1,172 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/graphite/integrations/influxdb.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/graphite/metadata.yaml"
+sidebar_label: "InfluxDB"
+learn_status: "Published"
+learn_rel_path: "Exporting Metrics"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
+endmeta-->
+
+# InfluxDB
+
+
+<img src="https://netdata.cloud/img/influxdb.svg" width="150"/>
+
+
+Use the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,
+further analysis, or correlation with data from other sources.
+
+
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Setup
+
+### Prerequisites
+
+####
+
+- You have already installed Netdata and Graphite.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `exporting.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config exporting.conf
+```
+#### Options
+
+The following options can be defined for this exporter.
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |
+| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |
+| username | Username for HTTP authentication | my_username | no |
+| password | Password for HTTP authentication | my_password | no |
+| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |
+| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |
+| prefix | The prefix to add to all metrics. | netdata | no |
+| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
+| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
+| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
+| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
+| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
+| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |
+
+##### destination
+
+The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
+
+Example IPv4:
+ ```yaml
+ destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
+ ```
+Example IPv6 and IPv4 together:
+```yaml
+destination = [ffff:...:0001]:2003 10.11.12.1:2003
+```
+When multiple servers are defined, Netdata will try the next one when the previous one fails.
+
+
+##### update every
+
+Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+
+
+##### buffer on failures
+
+If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+
+
+##### send hosts matching
+
+Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+
+
+##### send charts matching
+
+A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+has a higher priority than the configuration option.
+
+
+##### send names instead of ids
+
+Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+
+
+</details>
+
+#### Examples
+
+##### Example configuration
+
+Basic configuration
+
+```yaml
+[graphite:netdata]
+ enabled = yes
+ destination = localhost:2003
+
+```
+##### Configuration with HTTPS and HTTP authentication
+
+Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.
+
+```yaml
+[graphite:netdata]
+ enabled = yes
+ destination = localhost:2003
+ username = my_username
+ password = my_password
+
+```
+##### Detailed Configuration for a remote, secure host
+
+Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.
+
+```yaml
+[graphite:https:netdata]
+ enabled = yes
+ username = my_username
+ password = my_password
+ destination = 10.10.1.114:2003
+ # data source = average
+ # prefix = netdata
+ # hostname = my_hostname
+ # update every = 10
+ # buffer on failures = 10
+ # timeout ms = 20000
+ # send names instead of ids = yes
+ # send charts matching = *
+ # send hosts matching = localhost *
+
+```
+
diff --git a/exporting/graphite/integrations/kairosdb.md b/src/exporting/graphite/integrations/kairosdb.md
index e35d2497c..d5dad7f42 100644
--- a/exporting/graphite/integrations/kairosdb.md
+++ b/src/exporting/graphite/integrations/kairosdb.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/graphite/integrations/kairosdb.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/graphite/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/graphite/integrations/kairosdb.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/graphite/metadata.yaml"
sidebar_label: "KairosDB"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -38,7 +38,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -48,7 +48,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -62,7 +62,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/src/exporting/graphite/metadata.yaml b/src/exporting/graphite/metadata.yaml
new file mode 100644
index 000000000..d7979354a
--- /dev/null
+++ b/src/exporting/graphite/metadata.yaml
@@ -0,0 +1,212 @@
+# yamllint disable rule:line-length
+---
+- &graphexport
+ id: 'export-graphite'
+ meta: &meta
+ name: 'Graphite'
+ link: 'https://graphite.readthedocs.io/en/latest/'
+ categories:
+ - export
+ icon_filename: 'graphite.png'
+ keywords:
+ - exporter
+ - graphite
+ - remote write
+ - time series
+ overview:
+ exporter_description: |
+ Use the Graphite connector for the exporting engine to archive your Netdata metrics to Graphite providers for long-term storage,
+ further analysis, or correlation with data from other sources.
+ exporter_limitations: ''
+ setup:
+ prerequisites:
+ list:
+ - title: ''
+ description: |
+ - You have already installed Netdata and Graphite.
+ configuration:
+ file:
+ name: 'exporting.conf'
+ options:
+ description: 'The following options can be defined for this exporter.'
+ folding:
+ title: 'Config options'
+ enabled: true
+ list:
+ - name: 'enabled'
+ default_value: 'no'
+ description: 'Enables or disables an exporting connector instance (yes|no).'
+ required: true
+ - name: 'destination'
+ default_value: 'no'
+ description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
+ required: true
+ detailed_description: |
+ The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+ - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+ - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+ - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
+
+ Example IPv4:
+ ```yaml
+ destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
+ ```
+ Example IPv6 and IPv4 together:
+ ```yaml
+ destination = [ffff:...:0001]:2003 10.11.12.1:2003
+ ```
+ When multiple servers are defined, Netdata will try the next one when the previous one fails.
+ - name: 'username'
+ default_value: 'my_username'
+ description: 'Username for HTTP authentication'
+ required: false
+ - name: 'password'
+ default_value: 'my_password'
+ description: 'Password for HTTP authentication'
+ required: false
+ - name: 'data source'
+ default_value: ''
+ description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
+ required: false
+ - name: 'hostname'
+ default_value: '[global].hostname'
+ description: 'The hostname to be used for sending data to the external database server.'
+ required: false
+ - name: 'prefix'
+ default_value: 'netdata'
+ description: 'The prefix to add to all metrics.'
+ required: false
+ - name: 'update every'
+ default_value: '10'
+ description: |
+ Frequency of sending sending data to the external database, in seconds.
+ required: false
+ detailed_description: |
+ Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+ send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+ - name: 'buffer on failures'
+ default_value: '10'
+ description: |
+ The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
+ required: false
+ detailed_description: |
+ If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+ - name: 'timeout ms'
+ default_value: '20000'
+ description: 'The timeout in milliseconds to wait for the external database server to process the data.'
+ required: false
+ - name: 'send hosts matching'
+ default_value: 'localhost *'
+ description: |
+ Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns).
+ required: false
+ detailed_description: |
+ Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+ The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+ filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+ A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+ use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+ - name: 'send charts matching'
+ default_value: '*'
+ description: |
+ One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
+ required: false
+ detailed_description: |
+ A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+ use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+ positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+ has a higher priority than the configuration option.
+ - name: 'send names instead of ids'
+ default_value: ''
+ description: 'Controls the metric names Netdata should send to the external database (yes|no).'
+ required: false
+ detailed_description: |
+ Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+ are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+ different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+ - name: 'send configured labels'
+ default_value: ''
+ description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
+ required: false
+ - name: 'send automatic labels'
+ default_value: ''
+ description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ''
+ list:
+ - name: 'Example configuration'
+ folding:
+ enabled: false
+ description: 'Basic configuration'
+ config: |
+ [graphite:netdata]
+ enabled = yes
+ destination = localhost:2003
+ - name: 'Configuration with HTTPS and HTTP authentication'
+ folding:
+ enabled: false
+ description: 'Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.'
+ config: |
+ [graphite:netdata]
+ enabled = yes
+ destination = localhost:2003
+ username = my_username
+ password = my_password
+ - name: 'Detailed Configuration for a remote, secure host'
+ folding:
+ enabled: false
+ description: 'Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.'
+ config: |
+ [graphite:https:netdata]
+ enabled = yes
+ username = my_username
+ password = my_password
+ destination = 10.10.1.114:2003
+ # data source = average
+ # prefix = netdata
+ # hostname = my_hostname
+ # update every = 10
+ # buffer on failures = 10
+ # timeout ms = 20000
+ # send names instead of ids = yes
+ # send charts matching = *
+ # send hosts matching = localhost *
+- <<: *graphexport
+ id: 'export-blueflood'
+ meta:
+ <<: *meta
+ name: Blueflood
+ link: http://blueflood.io/
+ icon_filename: 'blueflood.png'
+ keywords:
+ - export
+ - Blueflood
+ - graphite
+- <<: *graphexport
+ id: 'export-influxdb'
+ meta:
+ <<: *meta
+ name: InfluxDB
+ link: https://www.influxdata.com/
+ icon_filename: 'influxdb.svg'
+ keywords:
+ - InfluxDB
+ - Influx
+ - export
+ - graphite
+- <<: *graphexport
+ id: 'export-kairosdb'
+ meta:
+ <<: *meta
+ name: KairosDB
+ link: https://kairosdb.github.io/
+ icon_filename: 'kairos.png'
+ keywords:
+ - KairosDB
+ - kairos
+ - export
+ - graphite
diff --git a/exporting/init_connectors.c b/src/exporting/init_connectors.c
index 5167a68c2..3481ded4f 100644
--- a/exporting/init_connectors.c
+++ b/src/exporting/init_connectors.c
@@ -5,7 +5,7 @@
#include "json/json.h"
#include "opentsdb/opentsdb.h"
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
#include "prometheus/remote_write/remote_write.h"
#endif
@@ -13,11 +13,11 @@
#include "aws_kinesis/aws_kinesis.h"
#endif
-#if ENABLE_EXPORTING_PUBSUB
+#ifdef ENABLE_EXPORTING_PUBSUB
#include "pubsub/pubsub.h"
#endif
-#if HAVE_MONGOC
+#ifdef HAVE_MONGOC
#include "mongodb/mongodb.h"
#endif
@@ -61,7 +61,7 @@ int init_connectors(struct engine *engine)
return 1;
break;
case EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE:
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
if (init_prometheus_remote_write_instance(instance) != 0)
return 1;
#endif
@@ -79,7 +79,7 @@ int init_connectors(struct engine *engine)
#endif
break;
case EXPORTING_CONNECTOR_TYPE_MONGODB:
-#if HAVE_MONGOC
+#ifdef HAVE_MONGOC
if (init_mongodb_instance(instance) != 0)
return 1;
#endif
@@ -95,11 +95,9 @@ int init_connectors(struct engine *engine)
netdata_log_error("EXPORTING: cannot create thread worker. uv_thread_create(): %s", uv_strerror(error));
return 1;
}
- char threadname[NETDATA_THREAD_NAME_MAX + 1];
- snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "EXPORTING-%zu", instance->index);
- uv_thread_set_name_np(instance->thread, threadname);
- send_statistics("EXPORTING_START", "OK", instance->config.type_name);
+ analytics_statistic_t statistic = { "EXPORTING_START", "OK", instance->config.type_name };
+ analytics_statistic_send(&statistic);
}
return 0;
diff --git a/exporting/json/README.md b/src/exporting/json/README.md
index 0a8793ca8..0a8793ca8 120000
--- a/exporting/json/README.md
+++ b/src/exporting/json/README.md
diff --git a/exporting/json/integrations/json.md b/src/exporting/json/integrations/json.md
index ab4699d99..0b17aa318 100644
--- a/exporting/json/integrations/json.md
+++ b/src/exporting/json/integrations/json.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/json/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/json/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/json/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/json/metadata.yaml"
sidebar_label: "JSON"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -37,7 +37,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -48,7 +48,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -62,7 +62,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/src/exporting/json/json.c b/src/exporting/json/json.c
new file mode 100644
index 000000000..e9c4db635
--- /dev/null
+++ b/src/exporting/json/json.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "json.h"
+
+/**
+ * Initialize JSON connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_json_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ struct simple_connector_config *connector_specific_config = callocz(1, sizeof(struct simple_connector_config));
+ instance->config.connector_specific_config = (void *)connector_specific_config;
+ connector_specific_config->default_port = 5448;
+
+ struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = connector_specific_data;
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_labels_json_plaintext;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_json_plaintext;
+ else
+ instance->metric_formatting = format_dimension_stored_json_plaintext;
+
+ instance->end_chart_formatting = NULL;
+ instance->variables_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = simple_connector_end_batch;
+
+ instance->prepare_header = NULL;
+
+ instance->check_response = exporting_discard_response;
+
+ instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
+ if (!instance->buffer) {
+ netdata_log_error("EXPORTING: cannot create buffer for json exporting connector instance %s", instance->config.name);
+ return 1;
+ }
+
+ simple_connector_init(instance);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Initialize JSON connector instance for HTTP protocol
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_json_http_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ struct simple_connector_config *connector_specific_config = callocz(1, sizeof(struct simple_connector_config));
+ instance->config.connector_specific_config = (void *)connector_specific_config;
+ connector_specific_config->default_port = 5448;
+
+ struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = connector_specific_data;
+
+#ifdef ENABLE_HTTPS
+ connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION;
+ if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
+ netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX);
+ }
+#endif
+
+ instance->start_batch_formatting = open_batch_json_http;
+ instance->start_host_formatting = format_host_labels_json_plaintext;
+ instance->start_chart_formatting = NULL;
+
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED)
+ instance->metric_formatting = format_dimension_collected_json_plaintext;
+ else
+ instance->metric_formatting = format_dimension_stored_json_plaintext;
+
+ instance->end_chart_formatting = NULL;
+ instance->variables_formatting = NULL;
+ instance->end_host_formatting = flush_host_labels;
+ instance->end_batch_formatting = close_batch_json_http;
+
+ instance->prepare_header = json_http_prepare_header;
+
+ instance->check_response = exporting_discard_response;
+
+ instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
+
+ simple_connector_init(instance);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Format host labels for JSON connector
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @return Always returns 0.
+ */
+
+int format_host_labels_json_plaintext(struct instance *instance, RRDHOST *host)
+{
+ if (!instance->labels_buffer)
+ instance->labels_buffer = buffer_create(1024, &netdata_buffers_statistics.buffers_exporters);
+
+ if (unlikely(!sending_labels_configured(instance)))
+ return 0;
+
+ buffer_strcat(instance->labels_buffer, "\"labels\":{");
+ rrdlabels_to_buffer(host->rrdlabels, instance->labels_buffer, "", ":", "\"", ",",
+ exporting_labels_filter_callback, instance,
+ NULL, sanitize_json_string);
+ buffer_strcat(instance->labels_buffer, "},");
+
+ return 0;
+}
+
+/**
+ * Format dimension using collected data for JSON connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_collected_json_plaintext(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ if (instance->config.type == EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
+ if (buffer_strlen((BUFFER *)instance->buffer) > 2)
+ buffer_strcat(instance->buffer, ",\n");
+ }
+
+ buffer_sprintf(
+ instance->buffer,
+
+ "{"
+ "\"prefix\":\"%s\","
+ "\"hostname\":\"%s\","
+ "%s"
+
+ "\"chart_id\":\"%s\","
+ "\"chart_name\":\"%s\","
+ "\"chart_family\":\"%s\","
+ "\"chart_context\":\"%s\","
+ "\"chart_type\":\"%s\","
+ "\"units\":\"%s\","
+
+ "\"id\":\"%s\","
+ "\"name\":\"%s\","
+ "\"value\":" COLLECTED_NUMBER_FORMAT ","
+
+ "\"timestamp\":%llu}",
+
+ instance->config.prefix,
+ (host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
+ instance->labels_buffer ? buffer_tostring(instance->labels_buffer) : "",
+
+ rrdset_id(st),
+ rrdset_name(st),
+ rrdset_family(st),
+ rrdset_context(st),
+ rrdset_parts_type(st),
+ rrdset_units(st),
+ rrddim_id(rd),
+ rrddim_name(rd),
+ rd->collector.last_collected_value,
+
+ (unsigned long long)rd->collector.last_collected_time.tv_sec);
+
+ if (instance->config.type != EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
+ buffer_strcat(instance->buffer, "\n");
+ }
+
+ return 0;
+}
+
+/**
+ * Format dimension using a calculated value from stored data for JSON connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_stored_json_plaintext(struct instance *instance, RRDDIM *rd)
+{
+ RRDSET *st = rd->rrdset;
+ RRDHOST *host = st->rrdhost;
+
+ time_t last_t;
+ NETDATA_DOUBLE value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
+
+ if(isnan(value))
+ return 0;
+
+ if (instance->config.type == EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
+ if (buffer_strlen((BUFFER *)instance->buffer) > 2)
+ buffer_strcat(instance->buffer, ",\n");
+ }
+
+ buffer_sprintf(
+ instance->buffer,
+ "{"
+ "\"prefix\":\"%s\","
+ "\"hostname\":\"%s\","
+ "%s"
+
+ "\"chart_id\":\"%s\","
+ "\"chart_name\":\"%s\","
+ "\"chart_family\":\"%s\","
+ "\"chart_context\": \"%s\","
+ "\"chart_type\":\"%s\","
+ "\"units\": \"%s\","
+
+ "\"id\":\"%s\","
+ "\"name\":\"%s\","
+ "\"value\":" NETDATA_DOUBLE_FORMAT ","
+
+ "\"timestamp\": %llu}",
+
+ instance->config.prefix,
+ (host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
+ instance->labels_buffer ? buffer_tostring(instance->labels_buffer) : "",
+
+ rrdset_id(st),
+ rrdset_name(st),
+ rrdset_family(st),
+ rrdset_context(st),
+ rrdset_parts_type(st),
+ rrdset_units(st),
+ rrddim_id(rd),
+ rrddim_name(rd),
+ value,
+
+ (unsigned long long)last_t);
+
+ if (instance->config.type != EXPORTING_CONNECTOR_TYPE_JSON_HTTP) {
+ buffer_strcat(instance->buffer, "\n");
+ }
+
+ return 0;
+}
+
+/**
+ * Open a JSON list for a bach
+ *
+ * @param instance an instance data structure.
+ * @return Always returns 0.
+ */
+int open_batch_json_http(struct instance *instance)
+{
+ buffer_strcat(instance->buffer, "[\n");
+
+ return 0;
+}
+
+/**
+ * Close a JSON list for a bach and update buffered bytes counter
+ *
+ * @param instance an instance data structure.
+ * @return Always returns 0.
+ */
+int close_batch_json_http(struct instance *instance)
+{
+ buffer_strcat(instance->buffer, "\n]\n");
+
+ simple_connector_end_batch(instance);
+
+ return 0;
+}
+
+/**
+ * Prepare HTTP header
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+void json_http_prepare_header(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data = instance->connector_specific_data;
+
+ buffer_sprintf(
+ simple_connector_data->last_buffer->header,
+ "POST /api/put HTTP/1.1\r\n"
+ "Host: %s\r\n"
+ "%s"
+ "Content-Type: application/json\r\n"
+ "Content-Length: %lu\r\n"
+ "\r\n",
+ instance->config.destination,
+ simple_connector_data->auth_string ? simple_connector_data->auth_string : "",
+ (unsigned long int) buffer_strlen(simple_connector_data->last_buffer->buffer));
+
+ return;
+}
diff --git a/exporting/json/json.h b/src/exporting/json/json.h
index d916263a9..d916263a9 100644
--- a/exporting/json/json.h
+++ b/src/exporting/json/json.h
diff --git a/src/exporting/json/metadata.yaml b/src/exporting/json/metadata.yaml
new file mode 100644
index 000000000..75abfdac3
--- /dev/null
+++ b/src/exporting/json/metadata.yaml
@@ -0,0 +1,151 @@
+# yamllint disable rule:line-length
+---
+id: 'export-json'
+meta:
+ name: 'JSON'
+ link: ''
+ categories:
+ - export
+ icon_filename: 'json.svg'
+keywords:
+ - exporter
+ - json
+overview:
+ exporter_description: |
+ Use the JSON connector for the exporting engine to archive your agent's metrics to JSON document databases for long-term storage,
+ further analysis, or correlation with data from other sources
+ exporter_limitations: ''
+setup:
+ prerequisites:
+ list:
+ - title: ''
+ description: ''
+ configuration:
+ file:
+ name: 'exporting.conf'
+ options:
+ description: |
+ The following options can be defined for this exporter.
+ folding:
+ title: 'Config options'
+ enabled: true
+ list:
+ - name: 'enabled'
+ default_value: 'no'
+ description: 'Enables or disables an exporting connector instance (yes|no).'
+ required: true
+ - name: 'destination'
+ default_value: 'pubsub.googleapis.com'
+ description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
+ required: true
+ detailed_description: |
+ The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+ - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+ - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+ - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
+
+ Example IPv4:
+ ```yaml
+ destination = localhost:5448
+ ```
+ When multiple servers are defined, Netdata will try the next one when the previous one fails.
+ - name: 'username'
+ default_value: 'my_username'
+ description: 'Username for HTTP authentication'
+ required: false
+ - name: 'password'
+ default_value: 'my_password'
+ description: 'Password for HTTP authentication'
+ required: false
+ - name: 'data source'
+ default_value: ''
+ description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
+ required: false
+ - name: 'hostname'
+ default_value: '[global].hostname'
+ description: 'The hostname to be used for sending data to the external database server.'
+ required: false
+ - name: 'prefix'
+ default_value: 'Netdata'
+ description: 'The prefix to add to all metrics.'
+ required: false
+ - name: 'update every'
+ default_value: '10'
+ description: |
+ Frequency of sending sending data to the external database, in seconds.
+ required: false
+ detailed_description: |
+ Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+ send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+ - name: 'buffer on failures'
+ default_value: '10'
+ description: |
+ The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
+ required: false
+ detailed_description: |
+ If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+ - name: 'timeout ms'
+ default_value: '2 * update_every * 1000'
+ description: 'The timeout in milliseconds to wait for the external database server to process the data.'
+ required: false
+ - name: 'send hosts matching'
+ default_value: 'localhost *'
+ description: |
+ Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns).
+ required: false
+ detailed_description: |
+ Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+ The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+ filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+ A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+ use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+ - name: 'send charts matching'
+ default_value: '*'
+ description: |
+ One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
+ required: false
+ detailed_description: |
+ A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+ use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+ positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+ has a higher priority than the configuration option.
+ - name: 'send names instead of ids'
+ default_value: ''
+ description: 'Controls the metric names Netdata should send to the external database (yes|no).'
+ required: false
+ detailed_description: |
+ Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+ are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+ different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+ - name: 'send configured labels'
+ default_value: ''
+ description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
+ required: false
+ - name: 'send automatic labels'
+ default_value: ''
+ description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ''
+ list:
+ - name: 'Basic configuration'
+ folding:
+ enabled: false
+ description: ''
+ config: |
+ [json:my_json_instance]
+ enabled = yes
+ destination = localhost:5448
+ - name: 'Configuration with HTTPS and HTTP authentication'
+ folding:
+ enabled: false
+ description: 'Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `json:https:my_json_instance`.'
+ config: |
+ [json:my_json_instance]
+ enabled = yes
+ destination = localhost:5448
+ username = my_username
+ password = my_password
diff --git a/exporting/mongodb/README.md b/src/exporting/mongodb/README.md
index a28253054..a28253054 120000
--- a/exporting/mongodb/README.md
+++ b/src/exporting/mongodb/README.md
diff --git a/src/exporting/mongodb/integrations/mongodb.md b/src/exporting/mongodb/integrations/mongodb.md
new file mode 100644
index 000000000..c32ff5ee1
--- /dev/null
+++ b/src/exporting/mongodb/integrations/mongodb.md
@@ -0,0 +1,145 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/mongodb/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/mongodb/metadata.yaml"
+sidebar_label: "MongoDB"
+learn_status: "Published"
+learn_rel_path: "Exporting Metrics"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
+endmeta-->
+
+# MongoDB
+
+
+<img src="https://netdata.cloud/img/mongodb.svg" width="150"/>
+
+
+Use the MongoDB connector for the exporting engine to archive your agent's metrics to a MongoDB database
+for long-term storage, further analysis, or correlation with data from other sources.
+
+
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Setup
+
+### Prerequisites
+
+####
+
+- To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher.
+- Next, re-install Netdata from the source, which detects that the required library is now available.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `exporting.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config exporting.conf
+```
+#### Options
+
+The following options can be defined for this exporter.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |
+| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | localhost | yes |
+| username | Username for HTTP authentication | my_username | no |
+| password | Password for HTTP authentication | my_password | no |
+| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |
+| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |
+| prefix | The prefix to add to all metrics. | Netdata | no |
+| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
+| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
+| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
+| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
+| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
+| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |
+
+##### destination
+
+The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
+
+Example IPv4:
+ ```yaml
+ destination = 10.11.14.2:27017 10.11.14.3:4242 10.11.14.4:27017
+ ```
+Example IPv6 and IPv4 together:
+```yaml
+destination = [ffff:...:0001]:2003 10.11.12.1:2003
+```
+When multiple servers are defined, Netdata will try the next one when the previous one fails.
+
+
+##### update every
+
+Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+
+
+##### buffer on failures
+
+If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+
+
+##### send hosts matching
+
+Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+
+
+##### send charts matching
+
+A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+has a higher priority than the configuration option.
+
+
+##### send names instead of ids
+
+Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+
+
+</details>
+
+#### Examples
+
+##### Basic configuration
+
+The default socket timeout depends on the exporting connector update interval.
+The timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the sockettimeoutms MongoDB URI option.
+
+
+```yaml
+[mongodb:my_instance]
+ enabled = yes
+ destination = mongodb://<hostname>
+ database = your_database_name
+ collection = your_collection_name
+
+```
+
diff --git a/src/exporting/mongodb/metadata.yaml b/src/exporting/mongodb/metadata.yaml
new file mode 100644
index 000000000..87aafc02d
--- /dev/null
+++ b/src/exporting/mongodb/metadata.yaml
@@ -0,0 +1,151 @@
+# yamllint disable rule:line-length
+---
+id: 'export-mongodb'
+meta:
+ name: 'MongoDB'
+ link: 'https://www.mongodb.com/'
+ categories:
+ - export
+ icon_filename: 'mongodb.svg'
+keywords:
+ - exporter
+ - MongoDB
+overview:
+ exporter_description: |
+ Use the MongoDB connector for the exporting engine to archive your agent's metrics to a MongoDB database
+ for long-term storage, further analysis, or correlation with data from other sources.
+ exporter_limitations: ''
+setup:
+ prerequisites:
+ list:
+ - title: ''
+ description: |
+ - To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher.
+ - Next, re-install Netdata from the source, which detects that the required library is now available.
+ configuration:
+ file:
+ name: 'exporting.conf'
+ options:
+ description: |
+ The following options can be defined for this exporter.
+ folding:
+ title: 'Config options'
+ enabled: true
+ list:
+ - name: 'enabled'
+ default_value: 'no'
+ description: 'Enables or disables an exporting connector instance (yes|no).'
+ required: true
+ - name: 'destination'
+ default_value: 'localhost'
+ description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
+ required: true
+ detailed_description: |
+ The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+ - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+ - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+ - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
+
+ Example IPv4:
+ ```yaml
+ destination = 10.11.14.2:27017 10.11.14.3:4242 10.11.14.4:27017
+ ```
+ Example IPv6 and IPv4 together:
+ ```yaml
+ destination = [ffff:...:0001]:2003 10.11.12.1:2003
+ ```
+ When multiple servers are defined, Netdata will try the next one when the previous one fails.
+ - name: 'username'
+ default_value: 'my_username'
+ description: 'Username for HTTP authentication'
+ required: false
+ - name: 'password'
+ default_value: 'my_password'
+ description: 'Password for HTTP authentication'
+ required: false
+ - name: 'data source'
+ default_value: ''
+ description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
+ required: false
+ - name: 'hostname'
+ default_value: '[global].hostname'
+ description: 'The hostname to be used for sending data to the external database server.'
+ required: false
+ - name: 'prefix'
+ default_value: 'Netdata'
+ description: 'The prefix to add to all metrics.'
+ required: false
+ - name: 'update every'
+ default_value: '10'
+ description: |
+ Frequency of sending sending data to the external database, in seconds.
+ required: false
+ detailed_description: |
+ Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+ send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+ - name: 'buffer on failures'
+ default_value: '10'
+ description: |
+ The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
+ required: false
+ detailed_description: |
+ If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+ - name: 'timeout ms'
+ default_value: '2 * update_every * 1000'
+ description: 'The timeout in milliseconds to wait for the external database server to process the data.'
+ required: false
+ - name: 'send hosts matching'
+ default_value: 'localhost *'
+ description: |
+ Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns).
+ required: false
+ detailed_description: |
+ Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+ The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+ filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+ A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+ use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+ - name: 'send charts matching'
+ default_value: '*'
+ description: |
+ One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
+ required: false
+ detailed_description: |
+ A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+ use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+ positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+ has a higher priority than the configuration option.
+ - name: 'send names instead of ids'
+ default_value: ''
+ description: 'Controls the metric names Netdata should send to the external database (yes|no).'
+ required: false
+ detailed_description: |
+ Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+ are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+ different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+ - name: 'send configured labels'
+ default_value: ''
+ description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
+ required: false
+ - name: 'send automatic labels'
+ default_value: ''
+ description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ''
+ list:
+ - name: 'Basic configuration'
+ folding:
+ enabled: false
+ description: |
+ The default socket timeout depends on the exporting connector update interval.
+ The timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the sockettimeoutms MongoDB URI option.
+ config: |
+ [mongodb:my_instance]
+ enabled = yes
+ destination = mongodb://<hostname>
+ database = your_database_name
+ collection = your_collection_name
diff --git a/exporting/mongodb/mongodb.c b/src/exporting/mongodb/mongodb.c
index c65f8d4cc..1a278dcbd 100644
--- a/exporting/mongodb/mongodb.c
+++ b/src/exporting/mongodb/mongodb.c
@@ -285,6 +285,10 @@ void mongodb_connector_worker(void *instance_p)
struct mongodb_specific_data *connector_specific_data =
(struct mongodb_specific_data *)instance->connector_specific_data;
+ char threadname[ND_THREAD_TAG_MAX + 1];
+ snprintfz(threadname, ND_THREAD_TAG_MAX, "EXPMNG[%zu]", instance->index);
+ uv_thread_set_name_np(threadname);
+
while (!instance->engine->exit) {
struct stats *stats = &instance->stats;
diff --git a/exporting/mongodb/mongodb.h b/src/exporting/mongodb/mongodb.h
index f1867b288..f1867b288 100644
--- a/exporting/mongodb/mongodb.h
+++ b/src/exporting/mongodb/mongodb.h
diff --git a/exporting/nc-exporting.sh b/src/exporting/nc-exporting.sh
index 740f65d18..740f65d18 100755
--- a/exporting/nc-exporting.sh
+++ b/src/exporting/nc-exporting.sh
diff --git a/exporting/opentsdb/README.md b/src/exporting/opentsdb/README.md
index fef534603..fef534603 120000
--- a/exporting/opentsdb/README.md
+++ b/src/exporting/opentsdb/README.md
diff --git a/exporting/opentsdb/integrations/opentsdb.md b/src/exporting/opentsdb/integrations/opentsdb.md
index 1c3087e0e..ddf8cdf25 100644
--- a/exporting/opentsdb/integrations/opentsdb.md
+++ b/src/exporting/opentsdb/integrations/opentsdb.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/opentsdb/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/opentsdb/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/opentsdb/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/opentsdb/metadata.yaml"
sidebar_label: "OpenTSDB"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -38,7 +38,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -48,7 +48,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -62,7 +62,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/src/exporting/opentsdb/metadata.yaml b/src/exporting/opentsdb/metadata.yaml
new file mode 100644
index 000000000..c86ae9cae
--- /dev/null
+++ b/src/exporting/opentsdb/metadata.yaml
@@ -0,0 +1,176 @@
+# yamllint disable rule:line-length
+---
+id: 'export-opentsdb'
+meta:
+ name: 'OpenTSDB'
+ link: 'https://github.com/OpenTSDB/opentsdb'
+ categories:
+ - export
+ icon_filename: 'opentsdb.png'
+keywords:
+ - exporter
+ - OpenTSDB
+ - scalable time series
+overview:
+ exporter_description: |
+ Use the OpenTSDB connector for the exporting engine to archive your Netdata metrics to OpenTSDB databases for long-term storage,
+ further analysis, or correlation with data from other sources.
+ exporter_limitations: ''
+setup:
+ prerequisites:
+ list:
+ - title: ''
+ description: |
+ - OpenTSDB and Netdata, installed, configured and operational.
+ configuration:
+ file:
+ name: 'exporting.conf'
+ options:
+ description: 'The following options can be defined for this exporter.'
+ folding:
+ title: 'Config options'
+ enabled: true
+ list:
+ - name: 'enabled'
+ default_value: 'no'
+ description: 'Enables or disables an exporting connector instance (yes|no).'
+ required: true
+ - name: 'destination'
+ default_value: 'no'
+ description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
+ required: true
+ detailed_description: |
+ The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+ - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+ - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+ - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used (opentsdb = 4242).
+
+ Example IPv4:
+ ```yaml
+ destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242
+ ```
+ Example IPv6 and IPv4 together:
+ ```yaml
+ destination = [ffff:...:0001]:2003 10.11.12.1:2003
+ ```
+ When multiple servers are defined, Netdata will try the next one when the previous one fails.
+ - name: 'username'
+ default_value: 'my_username'
+ description: 'Username for HTTP authentication'
+ required: false
+ - name: 'password'
+ default_value: 'my_password'
+ description: 'Password for HTTP authentication'
+ required: false
+ - name: 'data source'
+ default_value: ''
+ description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
+ required: false
+ - name: 'hostname'
+ default_value: '[global].hostname'
+ description: 'The hostname to be used for sending data to the external database server.'
+ required: false
+ - name: 'prefix'
+ default_value: 'Netdata'
+ description: 'The prefix to add to all metrics.'
+ required: false
+ - name: 'update every'
+ default_value: '10'
+ description: |
+ Frequency of sending sending data to the external database, in seconds.
+ required: false
+ detailed_description: |
+ Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+ send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+ - name: 'buffer on failures'
+ default_value: '10'
+ description: |
+ The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
+ required: false
+ detailed_description: |
+ If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+ - name: 'timeout ms'
+ default_value: '2 * update_every * 1000'
+ description: 'The timeout in milliseconds to wait for the external database server to process the data.'
+ required: false
+ - name: 'send hosts matching'
+ default_value: 'localhost *'
+ description: |
+ Hosts filter. Determines which hosts will be sent to OpenTSDB. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns).
+ required: false
+ detailed_description: |
+ Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+ The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+ filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+ A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+ use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+ - name: 'send charts matching'
+ default_value: '*'
+ description: |
+ One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
+ required: false
+ detailed_description: |
+ A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+ use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+ positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+ has a higher priority than the configuration option.
+ - name: 'send names instead of ids'
+ default_value: ''
+ description: 'Controls the metric names Netdata should send to the external database (yes|no).'
+ required: false
+ detailed_description: |
+ Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+ are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+ different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+ - name: 'send configured labels'
+ default_value: ''
+ description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
+ required: false
+ - name: 'send automatic labels'
+ default_value: ''
+ description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ''
+ list:
+ - name: 'Minimal configuration'
+ folding:
+ enabled: false
+ description: |
+ Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol.
+ For example: `opentsdb:http:my_opentsdb_instance`, `opentsdb:https:my_opentsdb_instance`.
+ config: |
+ [opentsdb:my_opentsdb_instance]
+ enabled = yes
+ destination = localhost:4242
+ - name: 'HTTP authentication'
+ folding:
+ enabled: false
+ description: ''
+ config: |
+ [opentsdb:my_opentsdb_instance]
+ enabled = yes
+ destination = localhost:4242
+ username = my_username
+ password = my_password
+ - name: 'Using `send hosts matching`'
+ folding:
+ enabled: false
+ description: ''
+ config: |
+ [opentsdb:my_opentsdb_instance]
+ enabled = yes
+ destination = localhost:4242
+ send hosts matching = localhost *
+ - name: 'Using `send charts matching`'
+ folding:
+ enabled: false
+ description: ''
+ config: |
+ [opentsdb:my_opentsdb_instance]
+ enabled = yes
+ destination = localhost:4242
+ send charts matching = *
diff --git a/exporting/opentsdb/opentsdb.c b/src/exporting/opentsdb/opentsdb.c
index ffccb5b22..ab4495cb2 100644
--- a/exporting/opentsdb/opentsdb.c
+++ b/src/exporting/opentsdb/opentsdb.c
@@ -127,7 +127,7 @@ int init_opentsdb_http_instance(struct instance *instance)
void sanitize_opentsdb_label_value(char *dst, const char *src, size_t len)
{
while (*src != '\0' && len) {
- if (isalpha(*src) || isdigit(*src) || *src == '-' || *src == '.' || *src == '/' || IS_UTF8_BYTE(*src))
+ if (isalpha((uint8_t)*src) || isdigit((uint8_t)*src) || *src == '-' || *src == '.' || *src == '/' || IS_UTF8_BYTE(*src))
*dst++ = *src;
else
*dst++ = '_';
@@ -186,15 +186,13 @@ int format_dimension_collected_opentsdb_telnet(struct instance *instance, RRDDIM
buffer_sprintf(
instance->buffer,
- "put %s.%s.%s %llu " COLLECTED_NUMBER_FORMAT " host=%s%s%s%s\n",
+ "put %s.%s.%s %llu " COLLECTED_NUMBER_FORMAT " host=%s%s\n",
instance->config.prefix,
chart_name,
dimension_name,
(unsigned long long)rd->collector.last_collected_time.tv_sec,
rd->collector.last_collected_value,
(host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
- (host->tags) ? " " : "",
- (host->tags) ? rrdhost_tags(host) : "",
(instance->labels_buffer) ? buffer_tostring(instance->labels_buffer) : "");
return 0;
@@ -232,15 +230,13 @@ int format_dimension_stored_opentsdb_telnet(struct instance *instance, RRDDIM *r
buffer_sprintf(
instance->buffer,
- "put %s.%s.%s %llu " NETDATA_DOUBLE_FORMAT " host=%s%s%s%s\n",
+ "put %s.%s.%s %llu " NETDATA_DOUBLE_FORMAT " host=%s%s\n",
instance->config.prefix,
chart_name,
dimension_name,
(unsigned long long)last_t,
value,
(host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
- (host->tags) ? " " : "",
- (host->tags) ? rrdhost_tags(host) : "",
(instance->labels_buffer) ? buffer_tostring(instance->labels_buffer) : "");
return 0;
@@ -326,7 +322,7 @@ int format_dimension_collected_opentsdb_http(struct instance *instance, RRDDIM *
"\"timestamp\":%llu,"
"\"value\":"COLLECTED_NUMBER_FORMAT","
"\"tags\":{"
- "\"host\":\"%s%s%s\"%s"
+ "\"host\":\"%s\"%s"
"}"
"}",
instance->config.prefix,
@@ -335,8 +331,6 @@ int format_dimension_collected_opentsdb_http(struct instance *instance, RRDDIM *
(unsigned long long)rd->collector.last_collected_time.tv_sec,
rd->collector.last_collected_value,
(host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
- (host->tags) ? " " : "",
- (host->tags) ? rrdhost_tags(host) : "",
instance->labels_buffer ? buffer_tostring(instance->labels_buffer) : "");
return 0;
@@ -382,7 +376,7 @@ int format_dimension_stored_opentsdb_http(struct instance *instance, RRDDIM *rd)
"\"timestamp\":%llu,"
"\"value\":" NETDATA_DOUBLE_FORMAT ","
"\"tags\":{"
- "\"host\":\"%s%s%s\"%s"
+ "\"host\":\"%s\"%s"
"}"
"}",
instance->config.prefix,
@@ -391,8 +385,6 @@ int format_dimension_stored_opentsdb_http(struct instance *instance, RRDDIM *rd)
(unsigned long long)last_t,
value,
(host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
- (host->tags) ? " " : "",
- (host->tags) ? rrdhost_tags(host) : "",
instance->labels_buffer ? buffer_tostring(instance->labels_buffer) : "");
return 0;
diff --git a/exporting/opentsdb/opentsdb.h b/src/exporting/opentsdb/opentsdb.h
index b544ba8c1..b544ba8c1 100644
--- a/exporting/opentsdb/opentsdb.h
+++ b/src/exporting/opentsdb/opentsdb.h
diff --git a/exporting/process_data.c b/src/exporting/process_data.c
index c7792fa55..1c7eaa192 100644
--- a/exporting/process_data.c
+++ b/src/exporting/process_data.c
@@ -77,8 +77,8 @@ NETDATA_DOUBLE exporting_calculate_value_from_stored_data(
time_t before = instance->before;
// find the edges of the rrd database for this chart
- time_t first_t = storage_engine_oldest_time_s(rd->tiers[0].backend, rd->tiers[0].db_metric_handle);
- time_t last_t = storage_engine_latest_time_s(rd->tiers[0].backend, rd->tiers[0].db_metric_handle);
+ time_t first_t = storage_engine_oldest_time_s(rd->tiers[0].seb, rd->tiers[0].smh);
+ time_t last_t = storage_engine_latest_time_s(rd->tiers[0].seb, rd->tiers[0].smh);
time_t update_every = st->update_every;
struct storage_engine_query_handle handle;
@@ -126,7 +126,7 @@ NETDATA_DOUBLE exporting_calculate_value_from_stored_data(
size_t counter = 0;
NETDATA_DOUBLE sum = 0;
- for (storage_engine_query_init(rd->tiers[0].backend, rd->tiers[0].db_metric_handle, &handle, after, before, STORAGE_PRIORITY_SYNCHRONOUS); !storage_engine_query_is_finished(&handle);) {
+ for (storage_engine_query_init(rd->tiers[0].seb, rd->tiers[0].smh, &handle, after, before, STORAGE_PRIORITY_SYNCHRONOUS); !storage_engine_query_is_finished(&handle);) {
STORAGE_POINT sp = storage_engine_query_next_metric(&handle);
points_read++;
@@ -336,7 +336,6 @@ void end_batch_formatting(struct engine *engine)
*/
void prepare_buffers(struct engine *engine)
{
- netdata_thread_disable_cancelability();
start_batch_formatting(engine);
rrd_rdlock();
@@ -358,8 +357,7 @@ void prepare_buffers(struct engine *engine)
variables_formatting(engine, host);
end_host_formatting(engine, host);
}
- rrd_unlock();
- netdata_thread_enable_cancelability();
+ rrd_rdunlock();
end_batch_formatting(engine);
}
diff --git a/src/exporting/prometheus/README.md b/src/exporting/prometheus/README.md
new file mode 100644
index 000000000..81e62b7ec
--- /dev/null
+++ b/src/exporting/prometheus/README.md
@@ -0,0 +1,361 @@
+# Using Netdata with Prometheus
+
+Netdata supports exporting metrics to Prometheus in two ways:
+
+ - You can [configure Prometheus to scrape Netdata metrics](#configure-prometheus-to-scrape-netdata-metrics).
+
+ - You can [configure Netdata to push metrics to Prometheus](/src/exporting/prometheus/remote_write/README.md)
+ , using the Prometheus remote write API.
+
+## Netdata support for Prometheus
+
+Regardless of the methodology, you first need to understand how Netdata structures the metrics it exports to Prometheus
+and the capabilities it provides. The examples provided in this document assume that you will be using Netdata as
+a metrics endpoint, but the concepts apply as well to the remote write API method.
+
+### Understanding Netdata metrics
+
+#### Charts
+
+Each chart in Netdata has several properties (common to all its metrics):
+
+- `chart_id` - uniquely identifies a chart.
+
+- `chart_name` - a more human friendly name for `chart_id`, also unique.
+
+- `context` - this is the template of the chart. All disk I/O charts have the same context, all mysql requests charts
+ have the same context, etc. This is used for alert templates to match all the charts they should be attached to.
+
+- `family` groups a set of charts together. It is used as the submenu of the dashboard.
+
+- `units` is the units for all the metrics attached to the chart.
+
+#### Dimensions
+
+Then each Netdata chart contains metrics called `dimensions`. All the dimensions of a chart have the same units of
+measurement, and are contextually in the same category (ie. the metrics for disk bandwidth are `read` and `write` and
+they are both in the same chart).
+
+### Netdata data source
+
+Netdata can send metrics to Prometheus from 3 data sources:
+
+- `as collected` or `raw` - this data source sends the metrics to Prometheus as they are collected. No conversion is
+ done by Netdata. The latest value for each metric is just given to Prometheus. This is the most preferred method by
+ Prometheus, but it is also the harder to work with. To work with this data source, you will need to understand how
+ to get meaningful values out of them.
+
+ The format of the metrics is: `CONTEXT{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+
+ If the metric is a counter (`incremental` in Netdata lingo), `_total` is appended the context.
+
+ Unlike Prometheus, Netdata allows each dimension of a chart to have a different algorithm and conversion constants
+ (`multiplier` and `divisor`). In this case, that the dimensions of a charts are heterogeneous, Netdata will use this
+ format: `CONTEXT_DIMENSION{chart="CHART",family="FAMILY"}`
+
+- `average` - this data source uses the Netdata database to send the metrics to Prometheus as they are presented on
+ the Netdata dashboard. So, all the metrics are sent as gauges, at the units they are presented in the Netdata
+ dashboard charts. This is the easiest to work with.
+
+ The format of the metrics is: `CONTEXT_UNITS_average{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+
+ When this source is used, Netdata keeps track of the last access time for each Prometheus server fetching the
+ metrics. This last access time is used at the subsequent queries of the same Prometheus server to identify the
+ time-frame the `average` will be calculated.
+
+ So, no matter how frequently Prometheus scrapes Netdata, it will get all the database data.
+ To identify each Prometheus server, Netdata uses by default the IP of the client fetching the metrics.
+
+ If there are multiple Prometheus servers fetching data from the same Netdata, using the same IP, each Prometheus
+ server can append `server=NAME` to the URL. Netdata will use this `NAME` to uniquely identify the Prometheus server.
+
+- `sum` or `volume`, is like `average` but instead of averaging the values, it sums them.
+
+ The format of the metrics is: `CONTEXT_UNITS_sum{chart="CHART",family="FAMILY",dimension="DIMENSION"}`. All the
+ other operations are the same with `average`.
+
+ To change the data source to `sum` or `as-collected` you need to provide the `source` parameter in the request URL.
+ e.g.: `http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes&source=as-collected`
+
+ Keep in mind that early versions of Netdata were sending the metrics as: `CHART_DIMENSION{}`.
+
+### Querying Metrics
+
+Fetch with your web browser this URL:
+
+`http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes`
+
+_(replace `your.netdata.ip` with the ip or hostname of your Netdata server)_
+
+Netdata will respond with all the metrics it sends to Prometheus.
+
+If you search that page for `"system.cpu"` you will find all the metrics Netdata is exporting to Prometheus for this
+chart. `system.cpu` is the chart name on the Netdata dashboard (on the Netdata dashboard all charts have a text heading
+such as : `Total CPU utilization (system.cpu)`. What we are interested here in the chart name: `system.cpu`).
+
+Searching for `"system.cpu"` reveals:
+
+```sh
+# COMMENT homogeneous chart "system.cpu", context "system.cpu", family "cpu", units "percentage"
+# COMMENT netdata_system_cpu_percentage_average: dimension "guest_nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest_nice"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "guest", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="guest"} 1.7837326 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "steal", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="steal"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "softirq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="softirq"} 0.5275442 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "irq", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="irq"} 0.2260836 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "user", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="user"} 2.3362762 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 1.7961062 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "nice", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="nice"} 0.0000000 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "iowait", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="iowait"} 0.9671802 1500066662000
+# COMMENT netdata_system_cpu_percentage_average: dimension "idle", value is percentage, gauge, dt 1500066653 to 1500066662 inclusive
+netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="idle"} 92.3630770 1500066662000
+```
+
+_(Netdata response for `system.cpu` with source=`average`)_
+
+In `average` or `sum` data sources, all values are normalized and are reported to Prometheus as gauges. Now, use the
+'expression' text form in Prometheus. Begin to type the metrics we are looking for: `netdata_system_cpu`. You should see
+that the text form begins to auto-fill as Prometheus knows about this metric.
+
+If the data source was `as collected`, the response would be:
+
+```sh
+# COMMENT homogeneous chart "system.cpu", context "system.cpu", family "cpu", units "percentage"
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest_nice", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest_nice"} 0 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "guest", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="guest"} 63945 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "steal", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="steal"} 0 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "softirq", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="softirq"} 8295 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "irq", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="irq"} 4079 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "user", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="user"} 116488 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "system", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="system"} 35084 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "nice", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="nice"} 505 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "iowait", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="iowait"} 23314 1500066716438
+# COMMENT netdata_system_cpu_total: chart "system.cpu", context "system.cpu", family "cpu", dimension "idle", value * 1 / 1 delta gives percentage (counter)
+netdata_system_cpu_total{chart="system.cpu",family="cpu",dimension="idle"} 918470 1500066716438
+```
+
+_(Netdata response for `system.cpu` with source=`as-collected`)_
+
+For more information check Prometheus documentation.
+
+### Streaming data from upstream hosts
+
+The `format=prometheus` parameter only exports the host's Netdata metrics. If you are using the parent-child
+functionality of Netdata this ignores any upstream hosts - so you should consider using the below in your
+**prometheus.yml**:
+
+```yaml
+ metrics_path: '/api/v1/allmetrics'
+ params:
+ format: [ prometheus_all_hosts ]
+ honor_labels: true
+```
+
+This will report all upstream host data, and `honor_labels` will make Prometheus take note of the instance names
+provided.
+
+### Timestamps
+
+To pass the metrics through Prometheus pushgateway, Netdata supports the option `&timestamps=no` to send the metrics
+without timestamps.
+
+## Netdata host variables
+
+Netdata collects various system configuration metrics, like the max number of TCP sockets supported, the max number of
+files allowed system-wide, various IPC sizes, etc. These metrics are not exposed to Prometheus by default.
+
+To expose them, append `variables=yes` to the Netdata URL.
+
+### TYPE and HELP
+
+To save bandwidth, and because Prometheus does not use them anyway, `# TYPE` and `# HELP` lines are suppressed. If
+wanted they can be re-enabled via `types=yes` and `help=yes`, e.g.
+`/api/v1/allmetrics?format=prometheus&types=yes&help=yes`
+
+Note that if enabled, the `# TYPE` and `# HELP` lines are repeated for every occurrence of a metric, which goes against
+the Prometheus
+documentation's [specification for these lines](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#comments-help-text-and-type-information).
+
+### Names and IDs
+
+Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and
+names are human friendly labels (also unique).
+
+Most charts and metrics have the same ID and name, but in several cases they are different: disks with device-mapper,
+interrupts, QoS classes, statsd synthetic charts, etc.
+
+The default is controlled in `exporting.conf`:
+
+```conf
+[prometheus:exporter]
+ send names instead of ids = yes | no
+```
+
+You can overwrite it from Prometheus, by appending to the URL:
+
+- `&names=no` to get IDs (the old behaviour)
+- `&names=yes` to get names
+
+### Filtering metrics sent to Prometheus
+
+Netdata can filter the metrics it sends to Prometheus with this setting:
+
+```conf
+[prometheus:exporter]
+ send charts matching = *
+```
+
+This settings accepts a space separated list
+of [simple patterns](/src/libnetdata/simple_pattern/README.md) to match the
+**charts** to be sent to Prometheus. Each pattern can use `*` as wildcard, any number of times (e.g `*a*b*c*` is valid).
+Patterns starting with `!` give a negative match (e.g `!*.bad users.* groups.*` will send all the users and groups
+except `bad` user and `bad` group). The order is important: the first match (positive or negative) left to right, is
+used.
+
+### Changing the prefix of Netdata metrics
+
+Netdata sends all metrics prefixed with `netdata_`. You can change this in `netdata.conf`, like this:
+
+```conf
+[prometheus:exporter]
+ prefix = netdata
+```
+
+It can also be changed from the URL, by appending `&prefix=netdata`.
+
+### Metric Units
+
+The default source `average` adds the unit of measurement to the name of each metric (e.g. `_KiB_persec`). To hide the
+units and get the same metric names as with the other sources, append to the URL `&hideunits=yes`.
+
+The units were standardized in v1.12, with the effect of changing the metric names. To get the metric names as they were
+before v1.12, append to the URL `&oldunits=yes`
+
+### Accuracy of `average` and `sum` data sources
+
+When the data source is set to `average` or `sum`, Netdata remembers the last access of each client accessing Prometheus
+metrics and uses this last access time to respond with the `average` or `sum` of all the entries in the database since
+that. This means that Prometheus servers are not losing data when they access Netdata with data source = `average` or
+`sum`.
+
+To uniquely identify each Prometheus server, Netdata uses the IP of the client accessing the metrics. If however the IP
+is not good enough for identifying a single Prometheus server (e.g. when Prometheus servers are accessing Netdata
+through a web proxy, or when multiple Prometheus servers are NATed to a single IP), each Prometheus may append
+`&server=NAME` to the URL. This `NAME` is used by Netdata to uniquely identify each Prometheus server and keep track of
+its last access time.
+
+## Configure Prometheus to scrape Netdata metrics
+
+The following `prometheus.yml` file will scrape all netdata metrics "as collected".
+
+Make sure to replace `your.netdata.ip` with the IP or hostname of the host running Netdata.
+
+```yaml
+# my global config
+global:
+ scrape_interval: 5s # Set the scrape interval to every 5 seconds. Default is every 1 minute.
+ evaluation_interval: 5s # Evaluate rules every 5 seconds. The default is every 1 minute.
+ # scrape_timeout is set to the global default (10s).
+
+ # Attach these labels to any time series or alerts when communicating with
+ # external systems (federation, remote storage, Alertmanager).
+ external_labels:
+ monitor: 'codelab-monitor'
+
+# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
+rule_files:
+# - "first.rules"
+# - "second.rules"
+
+# A scrape configuration containing exactly one endpoint to scrape:
+# Here it's Prometheus itself.
+scrape_configs:
+ # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+ - job_name: 'prometheus'
+
+ # metrics_path defaults to '/metrics'
+ # scheme defaults to 'http'.
+
+ static_configs:
+ - targets: [ '0.0.0.0:9090' ]
+
+ - job_name: 'netdata-scrape'
+
+ metrics_path: '/api/v1/allmetrics'
+ params:
+ # format: prometheus | prometheus_all_hosts
+ # You can use `prometheus_all_hosts` if you want Prometheus to set the `instance` to your hostname instead of IP
+ format: [ prometheus ]
+ #
+ # sources: as-collected | raw | average | sum | volume
+ # default is: average
+ #source: [as-collected]
+ #
+ # server name for this prometheus - the default is the client IP
+ # for Netdata to uniquely identify it
+ #server: ['prometheus1']
+ honor_labels: true
+
+ static_configs:
+ - targets: [ '{your.netdata.ip}:19999' ]
+```
+
+### Prometheus alerts for Netdata metrics
+
+The following is an example of a `nodes.yml` file that will allow Prometheus to generate alerts from some Netdata sources.
+Save it at `/opt/prometheus/nodes.yml`, and add a _- "nodes.yml"_ entry under the _rule_files:_ section in the example prometheus.yml file above.
+
+```yaml
+groups:
+ - name: nodes
+
+ rules:
+ - alert: node_high_cpu_usage_70
+ expr: sum(sum_over_time(netdata_system_cpu_percentage_average{dimension=~"(user|system|softirq|irq|guest)"}[10m])) by (job) / sum(count_over_time(netdata_system_cpu_percentage_average{dimension="idle"}[10m])) by (job) > 70
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.'
+ summary: CPU alert for container node '{{ $labels.job }}'
+
+ - alert: node_high_memory_usage_70
+ expr: 100 / sum(netdata_system_ram_MB_average) by (job)
+ * sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.'
+ summary: Memory alert for container node '{{ $labels.job }}'
+
+ - alert: node_low_root_filesystem_space_20
+ expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job)
+ * sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.'
+ summary: Root filesystem alert for container node '{{ $labels.job }}'
+
+ - alert: node_root_filesystem_fill_rate_6h
+ expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0
+ for: 1h
+ labels:
+ severity: critical
+ annotations:
+ description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h.
+ summary: Disk fill alert for Swarm node '{{ $labels.job }}'
+```
diff --git a/exporting/prometheus/integrations/appoptics.md b/src/exporting/prometheus/integrations/appoptics.md
index 292933200..73ed5c843 100644
--- a/exporting/prometheus/integrations/appoptics.md
+++ b/src/exporting/prometheus/integrations/appoptics.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/appoptics.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/appoptics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "AppOptics"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/azure_data_explorer.md b/src/exporting/prometheus/integrations/azure_data_explorer.md
index aa8710aae..8acbef88a 100644
--- a/exporting/prometheus/integrations/azure_data_explorer.md
+++ b/src/exporting/prometheus/integrations/azure_data_explorer.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/azure_data_explorer.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/azure_data_explorer.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Azure Data Explorer"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/azure_event_hub.md b/src/exporting/prometheus/integrations/azure_event_hub.md
index bc8a0c9e1..42e2a0515 100644
--- a/exporting/prometheus/integrations/azure_event_hub.md
+++ b/src/exporting/prometheus/integrations/azure_event_hub.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/azure_event_hub.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/azure_event_hub.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Azure Event Hub"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/chronix.md b/src/exporting/prometheus/integrations/chronix.md
index 9794a624c..c7d315b79 100644
--- a/exporting/prometheus/integrations/chronix.md
+++ b/src/exporting/prometheus/integrations/chronix.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/chronix.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/chronix.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Chronix"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/cortex.md b/src/exporting/prometheus/integrations/cortex.md
index 784c62ce2..91fe3946d 100644
--- a/exporting/prometheus/integrations/cortex.md
+++ b/src/exporting/prometheus/integrations/cortex.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/cortex.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/cortex.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Cortex"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/cratedb.md b/src/exporting/prometheus/integrations/cratedb.md
index 75a46391d..87f30bc79 100644
--- a/exporting/prometheus/integrations/cratedb.md
+++ b/src/exporting/prometheus/integrations/cratedb.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/cratedb.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/cratedb.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "CrateDB"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/src/exporting/prometheus/integrations/elasticsearch.md b/src/exporting/prometheus/integrations/elasticsearch.md
new file mode 100644
index 000000000..42fac5f67
--- /dev/null
+++ b/src/exporting/prometheus/integrations/elasticsearch.md
@@ -0,0 +1,158 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/elasticsearch.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
+sidebar_label: "ElasticSearch"
+learn_status: "Published"
+learn_rel_path: "Exporting Metrics"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
+endmeta-->
+
+# ElasticSearch
+
+
+<img src="https://netdata.cloud/img/elasticsearch.svg" width="150"/>
+
+
+Use the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.
+
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Limitations
+
+The remote write exporting connector does not support buffer on failures.
+
+
+## Setup
+
+### Prerequisites
+
+####
+
+- Netdata and the external storage provider of your choice, installed, configured and operational.
+- `protobuf` and `snappy` libraries installed.
+- Netdata reinstalled after the libraries.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `exporting.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config exporting.conf
+```
+#### Options
+
+The following options can be defined for this exporter.
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |
+| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |
+| username | Username for HTTP authentication | my_username | no |
+| password | Password for HTTP authentication | my_password | no |
+| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |
+| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |
+| prefix | The prefix to add to all metrics. | netdata | no |
+| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
+| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
+| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
+| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
+| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
+| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |
+
+##### destination
+
+The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
+
+Example IPv4:
+ ```yaml
+ destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
+ ```
+Example IPv6 and IPv4 together:
+```yaml
+destination = [ffff:...:0001]:2003 10.11.12.1:2003
+```
+When multiple servers are defined, Netdata will try the next one when the previous one fails.
+
+
+##### update every
+
+Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+
+
+##### buffer on failures
+
+If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+
+
+##### send hosts matching
+
+Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+
+
+##### send charts matching
+
+A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+has a higher priority than the configuration option.
+
+
+##### send names instead of ids
+
+Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+
+
+</details>
+
+#### Examples
+
+##### Example configuration
+
+Basic example configuration for Prometheus remote write.
+
+```yaml
+[prometheus_remote_write:my_instance]
+ enabled = yes
+ destination = 10.11.14.2:2003
+ remote write URL path = /receive
+
+```
+##### Example configuration with HTTPS and HTTP authentication
+
+Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.
+
+```yaml
+[prometheus_remote_write:https:my_instance]
+ enabled = yes
+ destination = 10.11.14.2:2003
+ remote write URL path = /receive
+ username = my_username
+ password = my_password
+
+```
+
diff --git a/exporting/prometheus/integrations/gnocchi.md b/src/exporting/prometheus/integrations/gnocchi.md
index a61986c19..457adefc8 100644
--- a/exporting/prometheus/integrations/gnocchi.md
+++ b/src/exporting/prometheus/integrations/gnocchi.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/gnocchi.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/gnocchi.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Gnocchi"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/google_bigquery.md b/src/exporting/prometheus/integrations/google_bigquery.md
index aec0a9a5b..c9cb54cc7 100644
--- a/exporting/prometheus/integrations/google_bigquery.md
+++ b/src/exporting/prometheus/integrations/google_bigquery.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/google_bigquery.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/google_bigquery.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Google BigQuery"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/src/exporting/prometheus/integrations/greptimedb.md b/src/exporting/prometheus/integrations/greptimedb.md
new file mode 100644
index 000000000..cf1453eeb
--- /dev/null
+++ b/src/exporting/prometheus/integrations/greptimedb.md
@@ -0,0 +1,158 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/greptimedb.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
+sidebar_label: "GreptimeDB"
+learn_status: "Published"
+learn_rel_path: "Exporting Metrics"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
+endmeta-->
+
+# GreptimeDB
+
+
+<img src="https://netdata.cloud/img/greptimedb.png" width="150"/>
+
+
+Use the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.
+
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Limitations
+
+The remote write exporting connector does not support buffer on failures.
+
+
+## Setup
+
+### Prerequisites
+
+####
+
+- Netdata and the external storage provider of your choice, installed, configured and operational.
+- `protobuf` and `snappy` libraries installed.
+- Netdata reinstalled after the libraries.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `exporting.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config exporting.conf
+```
+#### Options
+
+The following options can be defined for this exporter.
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |
+| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |
+| username | Username for HTTP authentication | my_username | no |
+| password | Password for HTTP authentication | my_password | no |
+| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |
+| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |
+| prefix | The prefix to add to all metrics. | netdata | no |
+| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
+| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
+| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
+| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
+| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
+| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |
+
+##### destination
+
+The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
+
+Example IPv4:
+ ```yaml
+ destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
+ ```
+Example IPv6 and IPv4 together:
+```yaml
+destination = [ffff:...:0001]:2003 10.11.12.1:2003
+```
+When multiple servers are defined, Netdata will try the next one when the previous one fails.
+
+
+##### update every
+
+Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+
+
+##### buffer on failures
+
+If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+
+
+##### send hosts matching
+
+Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+
+
+##### send charts matching
+
+A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+has a higher priority than the configuration option.
+
+
+##### send names instead of ids
+
+Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+
+
+</details>
+
+#### Examples
+
+##### Example configuration
+
+Basic example configuration for Prometheus remote write.
+
+```yaml
+[prometheus_remote_write:my_instance]
+ enabled = yes
+ destination = 10.11.14.2:2003
+ remote write URL path = /receive
+
+```
+##### Example configuration with HTTPS and HTTP authentication
+
+Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.
+
+```yaml
+[prometheus_remote_write:https:my_instance]
+ enabled = yes
+ destination = 10.11.14.2:2003
+ remote write URL path = /receive
+ username = my_username
+ password = my_password
+
+```
+
diff --git a/exporting/prometheus/integrations/irondb.md b/src/exporting/prometheus/integrations/irondb.md
index 450f88339..6ab7c8f06 100644
--- a/exporting/prometheus/integrations/irondb.md
+++ b/src/exporting/prometheus/integrations/irondb.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/irondb.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/irondb.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "IRONdb"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/src/exporting/prometheus/integrations/kafka.md b/src/exporting/prometheus/integrations/kafka.md
new file mode 100644
index 000000000..207f292ff
--- /dev/null
+++ b/src/exporting/prometheus/integrations/kafka.md
@@ -0,0 +1,158 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/kafka.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
+sidebar_label: "Kafka"
+learn_status: "Published"
+learn_rel_path: "Exporting Metrics"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
+endmeta-->
+
+# Kafka
+
+
+<img src="https://netdata.cloud/img/kafka.svg" width="150"/>
+
+
+Use the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.
+
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Limitations
+
+The remote write exporting connector does not support buffer on failures.
+
+
+## Setup
+
+### Prerequisites
+
+####
+
+- Netdata and the external storage provider of your choice, installed, configured and operational.
+- `protobuf` and `snappy` libraries installed.
+- Netdata reinstalled after the libraries.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `exporting.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config exporting.conf
+```
+#### Options
+
+The following options can be defined for this exporter.
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |
+| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |
+| username | Username for HTTP authentication | my_username | no |
+| password | Password for HTTP authentication | my_password | no |
+| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |
+| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |
+| prefix | The prefix to add to all metrics. | netdata | no |
+| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
+| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
+| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
+| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
+| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
+| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |
+
+##### destination
+
+The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
+
+Example IPv4:
+ ```yaml
+ destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
+ ```
+Example IPv6 and IPv4 together:
+```yaml
+destination = [ffff:...:0001]:2003 10.11.12.1:2003
+```
+When multiple servers are defined, Netdata will try the next one when the previous one fails.
+
+
+##### update every
+
+Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+
+
+##### buffer on failures
+
+If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+
+
+##### send hosts matching
+
+Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+
+
+##### send charts matching
+
+A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+has a higher priority than the configuration option.
+
+
+##### send names instead of ids
+
+Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+
+
+</details>
+
+#### Examples
+
+##### Example configuration
+
+Basic example configuration for Prometheus remote write.
+
+```yaml
+[prometheus_remote_write:my_instance]
+ enabled = yes
+ destination = 10.11.14.2:2003
+ remote write URL path = /receive
+
+```
+##### Example configuration with HTTPS and HTTP authentication
+
+Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.
+
+```yaml
+[prometheus_remote_write:https:my_instance]
+ enabled = yes
+ destination = 10.11.14.2:2003
+ remote write URL path = /receive
+ username = my_username
+ password = my_password
+
+```
+
diff --git a/exporting/prometheus/integrations/m3db.md b/src/exporting/prometheus/integrations/m3db.md
index 689e8e851..75ff05b5d 100644
--- a/exporting/prometheus/integrations/m3db.md
+++ b/src/exporting/prometheus/integrations/m3db.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/m3db.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/m3db.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "M3DB"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/metricfire.md b/src/exporting/prometheus/integrations/metricfire.md
index 2d69e33f6..8e8797ca9 100644
--- a/exporting/prometheus/integrations/metricfire.md
+++ b/src/exporting/prometheus/integrations/metricfire.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/metricfire.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/metricfire.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "MetricFire"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/src/exporting/prometheus/integrations/new_relic.md b/src/exporting/prometheus/integrations/new_relic.md
new file mode 100644
index 000000000..7ecedd497
--- /dev/null
+++ b/src/exporting/prometheus/integrations/new_relic.md
@@ -0,0 +1,158 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/new_relic.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
+sidebar_label: "New Relic"
+learn_status: "Published"
+learn_rel_path: "Exporting Metrics"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
+endmeta-->
+
+# New Relic
+
+
+<img src="https://netdata.cloud/img/newrelic.svg" width="150"/>
+
+
+Use the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.
+
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Limitations
+
+The remote write exporting connector does not support buffer on failures.
+
+
+## Setup
+
+### Prerequisites
+
+####
+
+- Netdata and the external storage provider of your choice, installed, configured and operational.
+- `protobuf` and `snappy` libraries installed.
+- Netdata reinstalled after the libraries.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `exporting.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config exporting.conf
+```
+#### Options
+
+The following options can be defined for this exporter.
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |
+| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |
+| username | Username for HTTP authentication | my_username | no |
+| password | Password for HTTP authentication | my_password | no |
+| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |
+| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |
+| prefix | The prefix to add to all metrics. | netdata | no |
+| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
+| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
+| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
+| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
+| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
+| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |
+
+##### destination
+
+The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
+
+Example IPv4:
+ ```yaml
+ destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
+ ```
+Example IPv6 and IPv4 together:
+```yaml
+destination = [ffff:...:0001]:2003 10.11.12.1:2003
+```
+When multiple servers are defined, Netdata will try the next one when the previous one fails.
+
+
+##### update every
+
+Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+
+
+##### buffer on failures
+
+If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+
+
+##### send hosts matching
+
+Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+
+
+##### send charts matching
+
+A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+has a higher priority than the configuration option.
+
+
+##### send names instead of ids
+
+Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+
+
+</details>
+
+#### Examples
+
+##### Example configuration
+
+Basic example configuration for Prometheus remote write.
+
+```yaml
+[prometheus_remote_write:my_instance]
+ enabled = yes
+ destination = 10.11.14.2:2003
+ remote write URL path = /receive
+
+```
+##### Example configuration with HTTPS and HTTP authentication
+
+Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.
+
+```yaml
+[prometheus_remote_write:https:my_instance]
+ enabled = yes
+ destination = 10.11.14.2:2003
+ remote write URL path = /receive
+ username = my_username
+ password = my_password
+
+```
+
diff --git a/src/exporting/prometheus/integrations/opeansearch.md b/src/exporting/prometheus/integrations/opeansearch.md
new file mode 100644
index 000000000..77c494284
--- /dev/null
+++ b/src/exporting/prometheus/integrations/opeansearch.md
@@ -0,0 +1,158 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/opeansearch.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
+sidebar_label: "OpeanSearch"
+learn_status: "Published"
+learn_rel_path: "Exporting Metrics"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
+endmeta-->
+
+# OpeanSearch
+
+
+<img src="https://netdata.cloud/img/opensearch.svg" width="150"/>
+
+
+Use the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.
+
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Limitations
+
+The remote write exporting connector does not support buffer on failures.
+
+
+## Setup
+
+### Prerequisites
+
+####
+
+- Netdata and the external storage provider of your choice, installed, configured and operational.
+- `protobuf` and `snappy` libraries installed.
+- Netdata reinstalled after the libraries.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `exporting.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config exporting.conf
+```
+#### Options
+
+The following options can be defined for this exporter.
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |
+| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |
+| username | Username for HTTP authentication | my_username | no |
+| password | Password for HTTP authentication | my_password | no |
+| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |
+| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |
+| prefix | The prefix to add to all metrics. | netdata | no |
+| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
+| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
+| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
+| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
+| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
+| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |
+
+##### destination
+
+The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
+
+Example IPv4:
+ ```yaml
+ destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
+ ```
+Example IPv6 and IPv4 together:
+```yaml
+destination = [ffff:...:0001]:2003 10.11.12.1:2003
+```
+When multiple servers are defined, Netdata will try the next one when the previous one fails.
+
+
+##### update every
+
+Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+
+
+##### buffer on failures
+
+If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+
+
+##### send hosts matching
+
+Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+
+
+##### send charts matching
+
+A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+has a higher priority than the configuration option.
+
+
+##### send names instead of ids
+
+Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+
+
+</details>
+
+#### Examples
+
+##### Example configuration
+
+Basic example configuration for Prometheus remote write.
+
+```yaml
+[prometheus_remote_write:my_instance]
+ enabled = yes
+ destination = 10.11.14.2:2003
+ remote write URL path = /receive
+
+```
+##### Example configuration with HTTPS and HTTP authentication
+
+Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.
+
+```yaml
+[prometheus_remote_write:https:my_instance]
+ enabled = yes
+ destination = 10.11.14.2:2003
+ remote write URL path = /receive
+ username = my_username
+ password = my_password
+
+```
+
diff --git a/src/exporting/prometheus/integrations/postgresql.md b/src/exporting/prometheus/integrations/postgresql.md
new file mode 100644
index 000000000..4a899b5d4
--- /dev/null
+++ b/src/exporting/prometheus/integrations/postgresql.md
@@ -0,0 +1,158 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/postgresql.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
+sidebar_label: "PostgreSQL"
+learn_status: "Published"
+learn_rel_path: "Exporting Metrics"
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
+endmeta-->
+
+# PostgreSQL
+
+
+<img src="https://netdata.cloud/img/postgres.svg" width="150"/>
+
+
+Use the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.
+
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Limitations
+
+The remote write exporting connector does not support buffer on failures.
+
+
+## Setup
+
+### Prerequisites
+
+####
+
+- Netdata and the external storage provider of your choice, installed, configured and operational.
+- `protobuf` and `snappy` libraries installed.
+- Netdata reinstalled after the libraries.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `exporting.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config exporting.conf
+```
+#### Options
+
+The following options can be defined for this exporter.
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes |
+| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes |
+| username | Username for HTTP authentication | my_username | no |
+| password | Password for HTTP authentication | my_password | no |
+| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no |
+| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no |
+| prefix | The prefix to add to all metrics. | netdata | no |
+| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
+| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
+| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
+| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
+| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
+| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no |
+
+##### destination
+
+The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
+
+Example IPv4:
+ ```yaml
+ destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
+ ```
+Example IPv6 and IPv4 together:
+```yaml
+destination = [ffff:...:0001]:2003 10.11.12.1:2003
+```
+When multiple servers are defined, Netdata will try the next one when the previous one fails.
+
+
+##### update every
+
+Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+
+
+##### buffer on failures
+
+If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+
+
+##### send hosts matching
+
+Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+
+
+##### send charts matching
+
+A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+has a higher priority than the configuration option.
+
+
+##### send names instead of ids
+
+Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+
+
+</details>
+
+#### Examples
+
+##### Example configuration
+
+Basic example configuration for Prometheus remote write.
+
+```yaml
+[prometheus_remote_write:my_instance]
+ enabled = yes
+ destination = 10.11.14.2:2003
+ remote write URL path = /receive
+
+```
+##### Example configuration with HTTPS and HTTP authentication
+
+Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.
+
+```yaml
+[prometheus_remote_write:https:my_instance]
+ enabled = yes
+ destination = 10.11.14.2:2003
+ remote write URL path = /receive
+ username = my_username
+ password = my_password
+
+```
+
diff --git a/exporting/prometheus/integrations/prometheus_remote_write.md b/src/exporting/prometheus/integrations/prometheus_remote_write.md
index b9ce730ea..6b073d511 100644
--- a/exporting/prometheus/integrations/prometheus_remote_write.md
+++ b/src/exporting/prometheus/integrations/prometheus_remote_write.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/prometheus_remote_write.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/prometheus_remote_write.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Prometheus Remote Write"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/quasardb.md b/src/exporting/prometheus/integrations/quasardb.md
index 48d2419e0..4682f0800 100644
--- a/exporting/prometheus/integrations/quasardb.md
+++ b/src/exporting/prometheus/integrations/quasardb.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/quasardb.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/quasardb.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "QuasarDB"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/splunk_signalfx.md b/src/exporting/prometheus/integrations/splunk_signalfx.md
index 324101b20..792808817 100644
--- a/exporting/prometheus/integrations/splunk_signalfx.md
+++ b/src/exporting/prometheus/integrations/splunk_signalfx.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/splunk_signalfx.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/splunk_signalfx.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Splunk SignalFx"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/thanos.md b/src/exporting/prometheus/integrations/thanos.md
index 77fe11595..de61e29a6 100644
--- a/exporting/prometheus/integrations/thanos.md
+++ b/src/exporting/prometheus/integrations/thanos.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/thanos.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/thanos.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Thanos"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/tikv.md b/src/exporting/prometheus/integrations/tikv.md
index 656ee695b..74a62938c 100644
--- a/exporting/prometheus/integrations/tikv.md
+++ b/src/exporting/prometheus/integrations/tikv.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/tikv.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/tikv.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "TiKV"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/timescaledb.md b/src/exporting/prometheus/integrations/timescaledb.md
index 681a0a618..56a8fd49b 100644
--- a/exporting/prometheus/integrations/timescaledb.md
+++ b/src/exporting/prometheus/integrations/timescaledb.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/timescaledb.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/timescaledb.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "TimescaleDB"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/victoriametrics.md b/src/exporting/prometheus/integrations/victoriametrics.md
index 114aefc83..c2667ea73 100644
--- a/exporting/prometheus/integrations/victoriametrics.md
+++ b/src/exporting/prometheus/integrations/victoriametrics.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/victoriametrics.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/victoriametrics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "VictoriaMetrics"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/vmware_aria.md b/src/exporting/prometheus/integrations/vmware_aria.md
index 493d3550c..6015c398e 100644
--- a/exporting/prometheus/integrations/vmware_aria.md
+++ b/src/exporting/prometheus/integrations/vmware_aria.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/vmware_aria.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/vmware_aria.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "VMware Aria"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/exporting/prometheus/integrations/wavefront.md b/src/exporting/prometheus/integrations/wavefront.md
index a6bab0566..1803d30a6 100644
--- a/exporting/prometheus/integrations/wavefront.md
+++ b/src/exporting/prometheus/integrations/wavefront.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/integrations/wavefront.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/prometheus/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/integrations/wavefront.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/prometheus/metadata.yaml"
sidebar_label: "Wavefront"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -43,7 +43,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -53,7 +53,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -67,7 +67,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 20000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/src/exporting/prometheus/metadata.yaml b/src/exporting/prometheus/metadata.yaml
new file mode 100644
index 000000000..76f4b6d5a
--- /dev/null
+++ b/src/exporting/prometheus/metadata.yaml
@@ -0,0 +1,460 @@
+# yamllint disable rule:line-length
+---
+- &promexport
+ id: 'export-prometheus-remote'
+ meta: &meta
+ name: 'Prometheus Remote Write'
+ link: 'https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage'
+ categories:
+ - export
+ icon_filename: 'prometheus.svg'
+ keywords:
+ - exporter
+ - Prometheus
+ - remote write
+ - time series
+ overview:
+ exporter_description: |
+ Use the Prometheus remote write exporting connector to archive your Netdata metrics to the external storage provider of your choice for long-term storage and further analysis.
+ exporter_limitations: 'The remote write exporting connector does not support buffer on failures.'
+ setup:
+ prerequisites:
+ list:
+ - title: ''
+ description: |
+ - Netdata and the external storage provider of your choice, installed, configured and operational.
+ - `protobuf` and `snappy` libraries installed.
+ - Netdata reinstalled after the libraries.
+ configuration:
+ file:
+ name: 'exporting.conf'
+ options:
+ description: 'The following options can be defined for this exporter.'
+ folding:
+ title: 'Config options'
+ enabled: true
+ list:
+ - name: 'enabled'
+ default_value: 'no'
+ description: 'Enables or disables an exporting connector instance (yes|no).'
+ required: true
+ - name: 'destination'
+ default_value: 'no'
+ description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
+ required: true
+ detailed_description: |
+ The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+ - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+ - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+ - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
+
+ Example IPv4:
+ ```yaml
+ destination = 10.11.14.2:2003 10.11.14.3:4242 10.11.14.4:2003
+ ```
+ Example IPv6 and IPv4 together:
+ ```yaml
+ destination = [ffff:...:0001]:2003 10.11.12.1:2003
+ ```
+ When multiple servers are defined, Netdata will try the next one when the previous one fails.
+ - name: 'username'
+ default_value: 'my_username'
+ description: 'Username for HTTP authentication'
+ required: false
+ - name: 'password'
+ default_value: 'my_password'
+ description: 'Password for HTTP authentication'
+ required: false
+ - name: 'data source'
+ default_value: ''
+ description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
+ required: false
+ - name: 'hostname'
+ default_value: '[global].hostname'
+ description: 'The hostname to be used for sending data to the external database server.'
+ required: false
+ - name: 'prefix'
+ default_value: 'netdata'
+ description: 'The prefix to add to all metrics.'
+ required: false
+ - name: 'update every'
+ default_value: '10'
+ description: |
+ Frequency of sending sending data to the external database, in seconds.
+ required: false
+ detailed_description: |
+ Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+ send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+ - name: 'buffer on failures'
+ default_value: '10'
+ description: |
+ The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
+ required: false
+ detailed_description: |
+ If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+ - name: 'timeout ms'
+ default_value: '20000'
+ description: 'The timeout in milliseconds to wait for the external database server to process the data.'
+ required: false
+ - name: 'send hosts matching'
+ default_value: 'localhost *'
+ description: |
+ Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns).
+ required: false
+ detailed_description: |
+ Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+ The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+ filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+ A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+ use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+ - name: 'send charts matching'
+ default_value: '*'
+ description: |
+ One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
+ required: false
+ detailed_description: |
+ A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+ use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+ positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+ has a higher priority than the configuration option.
+ - name: 'send names instead of ids'
+ default_value: ''
+ description: 'Controls the metric names Netdata should send to the external database (yes|no).'
+ required: false
+ detailed_description: |
+ Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+ are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+ different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+ - name: 'send configured labels'
+ default_value: ''
+ description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
+ required: false
+ - name: 'send automatic labels'
+ default_value: ''
+ description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ''
+ list:
+ - name: 'Example configuration'
+ folding:
+ enabled: false
+ description: 'Basic example configuration for Prometheus remote write.'
+ config: |
+ [prometheus_remote_write:my_instance]
+ enabled = yes
+ destination = 10.11.14.2:2003
+ remote write URL path = /receive
+ - name: 'Example configuration with HTTPS and HTTP authentication'
+ folding:
+ enabled: false
+ description: 'Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.'
+ config: |
+ [prometheus_remote_write:https:my_instance]
+ enabled = yes
+ destination = 10.11.14.2:2003
+ remote write URL path = /receive
+ username = my_username
+ password = my_password
+- <<: *promexport
+ id: 'export-appoptics'
+ meta:
+ <<: *meta
+ name: AppOptics
+ link: https://www.solarwinds.com/appoptics
+ icon_filename: 'solarwinds.svg'
+ keywords:
+ - app optics
+ - AppOptics
+ - Solarwinds
+- <<: *promexport
+ id: 'export-azure-data'
+ meta:
+ <<: *meta
+ name: Azure Data Explorer
+ link: https://azure.microsoft.com/en-us/pricing/details/data-explorer/
+ icon_filename: 'azuredataex.jpg'
+ keywords:
+ - Azure Data Explorer
+ - Azure
+- <<: *promexport
+ id: 'export-azure-event'
+ meta:
+ <<: *meta
+ name: Azure Event Hub
+ link: https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-about
+ icon_filename: 'azureeventhub.png'
+ keywords:
+ - Azure Event Hub
+ - Azure
+- <<: *promexport
+ id: 'export-newrelic'
+ meta:
+ <<: *meta
+ name: New Relic
+ link: https://newrelic.com/
+ icon_filename: 'newrelic.svg'
+ keywords:
+ - export
+ - NewRelic
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-quasar'
+ meta:
+ <<: *meta
+ name: QuasarDB
+ link: https://doc.quasar.ai/master/
+ icon_filename: 'quasar.jpeg'
+ keywords:
+ - export
+ - quasar
+ - quasarDB
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-splunk'
+ meta:
+ <<: *meta
+ name: Splunk SignalFx
+ link: https://www.splunk.com/en_us/products/observability.html
+ icon_filename: 'splunk.svg'
+ keywords:
+ - export
+ - splunk
+ - signalfx
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-tikv'
+ meta:
+ <<: *meta
+ name: TiKV
+ link: https://tikv.org/
+ icon_filename: 'tikv.png'
+ keywords:
+ - export
+ - TiKV
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-thanos'
+ meta:
+ <<: *meta
+ name: Thanos
+ link: https://thanos.io/
+ icon_filename: 'thanos.png'
+ keywords:
+ - export
+ - thanos
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-victoria'
+ meta:
+ <<: *meta
+ name: VictoriaMetrics
+ link: https://victoriametrics.com/products/open-source/
+ icon_filename: 'victoriametrics.png'
+ keywords:
+ - export
+ - victoriametrics
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-vmware'
+ meta:
+ <<: *meta
+ name: VMware Aria
+ link: https://www.vmware.com/products/aria-operations-for-applications.html
+ icon_filename: 'aria.png'
+ keywords:
+ - export
+ - VMware
+ - Aria
+ - Tanzu
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-chronix'
+ meta:
+ <<: *meta
+ name: Chronix
+ link: https://dbdb.io/db/chronix
+ icon_filename: 'chronix.png'
+ keywords:
+ - export
+ - chronix
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-cortex'
+ meta:
+ <<: *meta
+ name: Cortex
+ link: https://cortexmetrics.io/
+ icon_filename: 'cortex.png'
+ keywords:
+ - export
+ - cortex
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-crate'
+ meta:
+ <<: *meta
+ name: CrateDB
+ link: https://crate.io/
+ icon_filename: 'crate.svg'
+ keywords:
+ - export
+ - CrateDB
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-elastic'
+ meta:
+ <<: *meta
+ name: ElasticSearch
+ link: https://www.elastic.co/
+ icon_filename: 'elasticsearch.svg'
+ keywords:
+ - export
+ - ElasticSearch
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-opensearch'
+ meta:
+ <<: *meta
+ name: OpeanSearch
+ link: https://opensearch.org/
+ icon_filename: 'opensearch.svg'
+ keywords:
+ - export
+ - OpenSearch
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-gnocchi'
+ meta:
+ <<: *meta
+ name: Gnocchi
+ link: https://wiki.openstack.org/wiki/Gnocchi
+ icon_filename: 'gnocchi.svg'
+ keywords:
+ - export
+ - Gnocchi
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-bigquery'
+ meta:
+ <<: *meta
+ name: Google BigQuery
+ link: https://cloud.google.com/bigquery/
+ icon_filename: 'bigquery.png'
+ keywords:
+ - export
+ - Google BigQuery
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-irondb'
+ meta:
+ <<: *meta
+ name: IRONdb
+ link: https://docs.circonus.com/irondb/
+ icon_filename: 'irondb.png'
+ keywords:
+ - export
+ - IRONdb
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-kafka'
+ meta:
+ <<: *meta
+ name: Kafka
+ link: https://kafka.apache.org/
+ icon_filename: 'kafka.svg'
+ keywords:
+ - export
+ - Kafka
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-m3db'
+ meta:
+ <<: *meta
+ name: M3DB
+ link: https://m3db.io/
+ icon_filename: 'm3db.png'
+ keywords:
+ - export
+ - M3DB
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-metricfire'
+ meta:
+ <<: *meta
+ name: MetricFire
+ link: https://www.metricfire.com/
+ icon_filename: 'metricfire.png'
+ keywords:
+ - export
+ - MetricFire
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-pgsql'
+ meta:
+ <<: *meta
+ name: PostgreSQL
+ link: https://www.postgresql.org/
+ icon_filename: 'postgres.svg'
+ keywords:
+ - export
+ - PostgreSQL
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-wavefront'
+ meta:
+ <<: *meta
+ name: Wavefront
+ link: https://docs.wavefront.com/wavefront_data_ingestion.html
+ icon_filename: 'wavefront.png'
+ keywords:
+ - export
+ - Wavefront
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-timescaledb'
+ meta:
+ <<: *meta
+ name: TimescaleDB
+ link: https://www.timescale.com/
+ icon_filename: 'timescale.png'
+ keywords:
+ - export
+ - TimescaleDB
+ - prometheus
+ - remote write
+- <<: *promexport
+ id: 'export-greptimedb'
+ meta:
+ <<: *meta
+ name: GreptimeDB
+ link: https://greptime.com/product/db
+ icon_filename: 'greptimedb.png'
+ keywords:
+ - export
+ - GreptimeDB
+ - prometheus
+ - remote write
diff --git a/src/exporting/prometheus/prometheus.c b/src/exporting/prometheus/prometheus.c
new file mode 100644
index 000000000..0ba83a939
--- /dev/null
+++ b/src/exporting/prometheus/prometheus.c
@@ -0,0 +1,1020 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "prometheus.h"
+
+// ----------------------------------------------------------------------------
+// PROMETHEUS
+// /api/v1/allmetrics?format=prometheus and /api/v1/allmetrics?format=prometheus_all_hosts
+
+static int is_matches_rrdset(struct instance *instance, RRDSET *st, SIMPLE_PATTERN *filter) {
+ if (instance->config.options & EXPORTING_OPTION_SEND_NAMES) {
+ return simple_pattern_matches_string(filter, st->name);
+ }
+ return simple_pattern_matches_string(filter, st->id);
+}
+
+/**
+ * Check if a chart can be sent to Prometheus
+ *
+ * @param instance an instance data structure.
+ * @param st a chart.
+ * @param filter a simple pattern to match against.
+ * @return Returns 1 if the chart can be sent, 0 otherwise.
+ */
+inline int can_send_rrdset(struct instance *instance, RRDSET *st, SIMPLE_PATTERN *filter)
+{
+#ifdef NETDATA_INTERNAL_CHECKS
+ RRDHOST *host = st->rrdhost;
+#endif
+
+ if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_EXPORTING_IGNORE)))
+ return 0;
+
+ if (filter) {
+ if (!is_matches_rrdset(instance, st, filter)) {
+ return 0;
+ }
+ } else if (unlikely(!rrdset_flag_check(st, RRDSET_FLAG_EXPORTING_SEND))) {
+ // we have not checked this chart
+ if (is_matches_rrdset(instance, st, instance->config.charts_pattern)) {
+ rrdset_flag_set(st, RRDSET_FLAG_EXPORTING_SEND);
+ } else {
+ rrdset_flag_set(st, RRDSET_FLAG_EXPORTING_IGNORE);
+ netdata_log_debug(
+ D_EXPORTING,
+ "EXPORTING: not sending chart '%s' of host '%s', because it is disabled for exporting.",
+ rrdset_id(st),
+ rrdhost_hostname(host));
+ return 0;
+ }
+ }
+
+ if (unlikely(!rrdset_is_available_for_exporting_and_alarms(st))) {
+ netdata_log_debug(
+ D_EXPORTING,
+ "EXPORTING: not sending chart '%s' of host '%s', because it is not available for exporting.",
+ rrdset_id(st),
+ rrdhost_hostname(host));
+ return 0;
+ }
+
+ if (unlikely(
+ st->rrd_memory_mode == RRD_MEMORY_MODE_NONE &&
+ !(EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED))) {
+ netdata_log_debug(
+ D_EXPORTING,
+ "EXPORTING: not sending chart '%s' of host '%s' because its memory mode is '%s' and the exporting connector requires database access.",
+ rrdset_id(st),
+ rrdhost_hostname(host),
+ rrd_memory_mode_name(host->rrd_memory_mode));
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct prometheus_server {
+ const char *server;
+ uint32_t hash;
+ RRDHOST *host;
+ time_t last_access;
+ struct prometheus_server *next;
+} *prometheus_server_root = NULL;
+
+static netdata_mutex_t prometheus_server_root_mutex = NETDATA_MUTEX_INITIALIZER;
+
+/**
+ * Clean server root local structure
+ */
+void prometheus_clean_server_root()
+{
+ if (prometheus_server_root) {
+ netdata_mutex_lock(&prometheus_server_root_mutex);
+
+ struct prometheus_server *ps;
+ for (ps = prometheus_server_root; ps; ) {
+ struct prometheus_server *current = ps;
+ ps = ps->next;
+ if(current->server)
+ freez((void *)current->server);
+
+ freez(current);
+ }
+ prometheus_server_root = NULL;
+ netdata_mutex_unlock(&prometheus_server_root_mutex);
+ }
+}
+
+/**
+ * Get the last time when a Prometheus server scraped the Netdata Prometheus exporter.
+ *
+ * @param server the name of the Prometheus server.
+ * @param host a data collecting host.
+ * @param now actual time.
+ * @return Returns the last time when the server accessed Netdata, or 0 if it is the first occurrence.
+ */
+static inline time_t prometheus_server_last_access(const char *server, RRDHOST *host, time_t now)
+{
+#ifdef UNIT_TESTING
+ return 0;
+#endif
+ uint32_t hash = simple_hash(server);
+
+ netdata_mutex_lock(&prometheus_server_root_mutex);
+
+ struct prometheus_server *ps;
+ for (ps = prometheus_server_root; ps; ps = ps->next) {
+ if (host == ps->host && hash == ps->hash && !strcmp(server, ps->server)) {
+ time_t last = ps->last_access;
+ ps->last_access = now;
+ netdata_mutex_unlock(&prometheus_server_root_mutex);
+ return last;
+ }
+ }
+
+ ps = callocz(1, sizeof(struct prometheus_server));
+ ps->server = strdupz(server);
+ ps->hash = hash;
+ ps->host = host;
+ ps->last_access = now;
+ ps->next = prometheus_server_root;
+ prometheus_server_root = ps;
+
+ netdata_mutex_unlock(&prometheus_server_root_mutex);
+ return 0;
+}
+
+/**
+ * Copy and sanitize name.
+ *
+ * @param d a destination string.
+ * @param s a source string.
+ * @param usable the number of characters to copy.
+ * @return Returns the length of the copied string.
+ */
+inline size_t prometheus_name_copy(char *d, const char *s, size_t usable)
+{
+ size_t n;
+
+ for (n = 0; *s && n < usable; d++, s++, n++) {
+ register char c = *s;
+
+ if (!isalnum(c))
+ *d = '_';
+ else
+ *d = c;
+ }
+ *d = '\0';
+
+ return n;
+}
+
+/**
+ * Copy and sanitize label.
+ *
+ * @param d a destination string.
+ * @param s a source string.
+ * @param usable the number of characters to copy.
+ * @return Returns the length of the copied string.
+ */
+inline size_t prometheus_label_copy(char *d, const char *s, size_t usable)
+{
+ size_t n;
+
+ // make sure we can escape one character without overflowing the buffer
+ usable--;
+
+ for (n = 0; *s && n < usable; d++, s++, n++) {
+ register char c = *s;
+
+ if (unlikely(c == '"' || c == '\\' || c == '\n')) {
+ *d++ = '\\';
+ n++;
+ }
+ *d = c;
+ }
+ *d = '\0';
+
+ return n;
+}
+
+/**
+ * Copy and sanitize units.
+ *
+ * @param d a destination string.
+ * @param s a source string.
+ * @param usable the number of characters to copy.
+ * @param showoldunits set this flag to 1 to show old (before v1.12) units.
+ * @return Returns the destination string.
+ */
+inline char *prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits)
+{
+ const char *sorig = s;
+ char *ret = d;
+ size_t n;
+
+ // Fix for issue 5227
+ if (unlikely(showoldunits)) {
+ static struct {
+ const char *newunit;
+ uint32_t hash;
+ const char *oldunit;
+ } units[] = { { "KiB/s", 0, "kilobytes/s" },
+ { "MiB/s", 0, "MB/s" },
+ { "GiB/s", 0, "GB/s" },
+ { "KiB", 0, "KB" },
+ { "MiB", 0, "MB" },
+ { "GiB", 0, "GB" },
+ { "inodes", 0, "Inodes" },
+ { "percentage", 0, "percent" },
+ { "faults/s", 0, "page faults/s" },
+ { "KiB/operation", 0, "kilobytes per operation" },
+ { "milliseconds/operation", 0, "ms per operation" },
+ { NULL, 0, NULL } };
+ static int initialized = 0;
+ int i;
+
+ if (unlikely(!initialized)) {
+ for (i = 0; units[i].newunit; i++)
+ units[i].hash = simple_hash(units[i].newunit);
+ initialized = 1;
+ }
+
+ uint32_t hash = simple_hash(s);
+ for (i = 0; units[i].newunit; i++) {
+ if (unlikely(hash == units[i].hash && !strcmp(s, units[i].newunit))) {
+ // netdata_log_info("matched extension for filename '%s': '%s'", filename, last_dot);
+ s = units[i].oldunit;
+ sorig = s;
+ break;
+ }
+ }
+ }
+ *d++ = '_';
+ for (n = 1; *s && n < usable; d++, s++, n++) {
+ register char c = *s;
+
+ if (!isalnum(c))
+ *d = '_';
+ else
+ *d = c;
+ }
+
+ if (n == 2 && sorig[0] == '%') {
+ n = 0;
+ d = ret;
+ s = "_percent";
+ for (; *s && n < usable; n++)
+ *d++ = *s++;
+ } else if (n > 3 && sorig[n - 3] == '/' && sorig[n - 2] == 's') {
+ n = n - 2;
+ d -= 2;
+ s = "_persec";
+ for (; *s && n < usable; n++)
+ *d++ = *s++;
+ }
+
+ *d = '\0';
+
+ return ret;
+}
+
+/**
+ * Format host labels for the Prometheus exporter
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ */
+
+struct format_prometheus_label_callback {
+ struct instance *instance;
+ size_t count;
+};
+
+static int format_prometheus_label_callback(const char *name, const char *value, RRDLABEL_SRC ls __maybe_unused, void *data) {
+ struct format_prometheus_label_callback *d = (struct format_prometheus_label_callback *)data;
+
+ if (!should_send_label(d->instance, ls)) return 0;
+
+ char k[PROMETHEUS_ELEMENT_MAX + 1];
+ char v[PROMETHEUS_ELEMENT_MAX + 1];
+
+ prometheus_name_copy(k, name, PROMETHEUS_ELEMENT_MAX);
+ prometheus_label_copy(v, value, PROMETHEUS_ELEMENT_MAX);
+
+ if (*k && *v) {
+ if (d->count > 0) buffer_strcat(d->instance->labels_buffer, ",");
+ buffer_sprintf(d->instance->labels_buffer, "%s=\"%s\"", k, v);
+ d->count++;
+ }
+ return 1;
+}
+
+void format_host_labels_prometheus(struct instance *instance, RRDHOST *host)
+{
+ if (unlikely(!sending_labels_configured(instance)))
+ return;
+
+ if (!instance->labels_buffer)
+ instance->labels_buffer = buffer_create(1024, &netdata_buffers_statistics.buffers_exporters);
+
+ struct format_prometheus_label_callback tmp = {
+ .instance = instance,
+ .count = 0
+ };
+ rrdlabels_walkthrough_read(host->rrdlabels, format_prometheus_label_callback, &tmp);
+}
+
+/**
+ * Format host labels for the Prometheus exporter
+ * We are using a structure instead a direct buffer to expand options quickly.
+ *
+ * @param data is the buffer used to add labels.
+ */
+
+static int format_prometheus_chart_label_callback(const char *name, const char *value, RRDLABEL_SRC ls __maybe_unused, void *data) {
+ BUFFER *wb = data;
+
+ if (name[0] == '_' )
+ return 1;
+
+ char k[PROMETHEUS_ELEMENT_MAX + 1];
+ char v[PROMETHEUS_ELEMENT_MAX + 1];
+
+ prometheus_name_copy(k, name, PROMETHEUS_ELEMENT_MAX);
+ prometheus_label_copy(v, value, PROMETHEUS_ELEMENT_MAX);
+
+ if (*k && *v)
+ buffer_sprintf(wb, ",%s=\"%s\"", k, v);
+
+ return 1;
+}
+
+struct host_variables_callback_options {
+ RRDHOST *host;
+ BUFFER *wb;
+ BUFFER *plabels_buffer;
+ EXPORTING_OPTIONS exporting_options;
+ PROMETHEUS_OUTPUT_OPTIONS output_options;
+ const char *prefix;
+ const char *labels;
+ time_t now;
+ int host_header_printed;
+ char name[PROMETHEUS_VARIABLE_MAX + 1];
+ SIMPLE_PATTERN *pattern;
+ struct instance *instance;
+ STRING *prometheus;
+};
+
+/**
+ * Print host variables.
+ *
+ * @param rv a variable.
+ * @param data callback options.
+ * @return Returns 1 if the chart can be sent, 0 otherwise.
+ */
+static int print_host_variables_callback(const DICTIONARY_ITEM *item __maybe_unused, void *rv_ptr __maybe_unused, void *data) {
+ const RRDVAR_ACQUIRED *rv = (const RRDVAR_ACQUIRED *)item;
+
+ struct host_variables_callback_options *opts = data;
+
+ if (!opts->host_header_printed) {
+ opts->host_header_printed = 1;
+ }
+
+ NETDATA_DOUBLE value = rrdvar2number(rv);
+ if (isnan(value) || isinf(value)) {
+ return 0;
+ }
+
+ char *label_pre = "";
+ char *label_post = "";
+ if (opts->labels && *opts->labels) {
+ label_pre = "{";
+ label_post = "}";
+ }
+
+ prometheus_name_copy(opts->name, rrdvar_name(rv), sizeof(opts->name));
+
+ if (opts->output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(
+ opts->wb,
+ "%s_%s%s%s%s " NETDATA_DOUBLE_FORMAT " %llu\n",
+ opts->prefix,
+ opts->name,
+ label_pre,
+ (opts->labels[0] == ',') ? &opts->labels[1] : opts->labels,
+ label_post,
+ value,
+ opts->now * 1000ULL);
+ else
+ buffer_sprintf(
+ opts->wb,
+ "%s_%s%s%s%s " NETDATA_DOUBLE_FORMAT "\n",
+ opts->prefix,
+ opts->name,
+ label_pre,
+ (opts->labels[0] == ',') ? &opts->labels[1] : opts->labels,
+ label_post,
+ value);
+
+ return 1;
+}
+
+struct gen_parameters {
+ const char *prefix;
+ const char *labels_prefix;
+ char *context;
+ char *suffix;
+
+ char *chart;
+ char *dimension;
+ char *family;
+ char *labels;
+
+ PROMETHEUS_OUTPUT_OPTIONS output_options;
+ RRDSET *st;
+ RRDDIM *rd;
+
+ const char *relation;
+ const char *type;
+};
+
+/**
+ * Write an as-collected help comment to a buffer.
+ *
+ * @param wb the buffer to write the comment to.
+ * @param context context name we are using
+ */
+static inline void generate_as_collected_prom_help(BUFFER *wb,
+ const char *prefix,
+ char *context,
+ char *units,
+ char *suffix,
+ RRDSET *st)
+{
+ buffer_sprintf(wb, "# HELP %s_%s%s%s %s\n", prefix, context, units, suffix, rrdset_title(st));
+}
+
+/**
+ * Write an as-collected help comment to a buffer.
+ *
+ * @param wb the buffer to write the comment to.
+ * @param context context name we are using
+ */
+static inline void generate_as_collected_prom_type(BUFFER *wb,
+ const char *prefix,
+ char *context,
+ char *units,
+ char *suffix,
+ const char *type)
+{
+ buffer_sprintf(wb, "# TYPE %s_%s%s%s %s\n", prefix, context, units, suffix, type);
+}
+
+/**
+ * Write an as-collected metric to a buffer.
+ *
+ * @param wb the buffer to write the metric to.
+ * @param p parameters for generating the metric string.
+ * @param homogeneous a flag for homogeneous charts.
+ * @param prometheus_collector a flag for metrics from prometheus collector.
+ * @param chart_labels the dictionary with chart labels
+ */
+static void generate_as_collected_from_metric(BUFFER *wb,
+ struct gen_parameters *p,
+ int homogeneous,
+ int prometheus_collector,
+ RRDLABELS *chart_labels)
+{
+ buffer_sprintf(wb, "%s_%s", p->prefix, p->context);
+
+ if (!homogeneous)
+ buffer_sprintf(wb, "_%s", p->dimension);
+
+ buffer_sprintf(wb, "%s{%schart=\"%s\"", p->suffix, p->labels_prefix, p->chart);
+
+ if (homogeneous)
+ buffer_sprintf(wb, ",%sdimension=\"%s\"", p->labels_prefix, p->dimension);
+
+ buffer_sprintf(wb, ",%sfamily=\"%s\"", p->labels_prefix, p->family);
+
+ rrdlabels_walkthrough_read(chart_labels, format_prometheus_chart_label_callback, wb);
+
+ buffer_sprintf(wb, "%s} ", p->labels);
+
+ if (prometheus_collector)
+ buffer_sprintf(
+ wb,
+ NETDATA_DOUBLE_FORMAT,
+ (NETDATA_DOUBLE)p->rd->collector.last_collected_value * (NETDATA_DOUBLE)p->rd->multiplier /
+ (NETDATA_DOUBLE)p->rd->divisor);
+ else
+ buffer_sprintf(wb, COLLECTED_NUMBER_FORMAT, p->rd->collector.last_collected_value);
+
+ if (p->output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(wb, " %"PRIu64"\n", timeval_msec(&p->rd->collector.last_collected_time));
+ else
+ buffer_sprintf(wb, "\n");
+}
+
+static void prometheus_print_os_info(
+ BUFFER *wb,
+ RRDHOST *host,
+ PROMETHEUS_OUTPUT_OPTIONS output_options)
+{
+ FILE *fp;
+ char filename[FILENAME_MAX + 1];
+ char buf[BUFSIZ + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/etc/os-release");
+ fp = fopen(filename, "r");
+ if (!fp) {
+ /* Fallback to lsb-release */
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/etc/lsb-release");
+ fp = fopen(filename, "r");
+ }
+ if (!fp) {
+ return;
+ }
+
+ buffer_sprintf(wb, "netdata_os_info{instance=\"%s\"", rrdhost_hostname(host));
+
+ while (fgets(buf, BUFSIZ, fp)) {
+ char *in, *sanitized;
+ char *key, *val;
+ int in_val_part = 0;
+
+ /* sanitize the line */
+ sanitized = in = buf;
+ in_val_part = 0;
+ while (*in && *in != '\n') {
+ if (!in_val_part) {
+ /* Only accepts alphabetic characters and '_'
+ * in key part */
+ if (isalpha((uint8_t)*in) || *in == '_') {
+ *(sanitized++) = tolower((uint8_t)*in);
+ } else if (*in == '=') {
+ in_val_part = 1;
+ *(sanitized++) = '=';
+ }
+ } else {
+ /* Don't accept special characters in
+ * value part */
+ switch (*in) {
+ case '"':
+ case '\'':
+ case '\r':
+ case '\t':
+ break;
+ default:
+ if (isprint((uint8_t)*in)) {
+ *(sanitized++) = *in;
+ }
+ }
+ }
+ in++;
+ }
+ /* Terminate the string */
+ *(sanitized++) = '\0';
+
+ /* Split key/val */
+ key = buf;
+ val = strchr(buf, '=');
+
+ /* If we have a key/value pair, add it as a label */
+ if (val) {
+ *val = '\0';
+ val++;
+ buffer_sprintf(wb, ",%s=\"%s\"", key, val);
+ }
+ }
+
+ /* Finish the line */
+ if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(wb, "} 1 %llu\n", now_realtime_usec() / USEC_PER_MS);
+ else
+ buffer_sprintf(wb, "} 1\n");
+
+ fclose(fp);
+}
+
+/**
+ * RRDSET to JSON
+ *
+ * From RRDSET extract content necessary to write JSON output.
+ *
+ * @param st netdata chart structure
+ * @param data structure with necessary data and to build expected result.
+ *
+ * @return I returns 1 when content was used and 0 otherwise.
+ */
+static int prometheus_rrdset_to_json(RRDSET *st, void *data)
+{
+ struct host_variables_callback_options *opts = data;
+
+ if (likely(can_send_rrdset(opts->instance, st, opts->pattern))) {
+ PROMETHEUS_OUTPUT_OPTIONS output_options = opts->output_options;
+ BUFFER *wb = opts->wb;
+ const char *prefix = opts->prefix;
+
+ BUFFER *plabels_buffer = opts->plabels_buffer;
+ const char *plabels_prefix = opts->instance->config.label_prefix;
+
+ STRING *prometheus = opts->prometheus;
+
+ char chart[PROMETHEUS_ELEMENT_MAX + 1];
+ char context[PROMETHEUS_ELEMENT_MAX + 1];
+ char family[PROMETHEUS_ELEMENT_MAX + 1];
+ char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
+
+ prometheus_label_copy(chart,
+ (output_options & PROMETHEUS_OUTPUT_NAMES && st->name) ?
+ rrdset_name(st) : rrdset_id(st), PROMETHEUS_ELEMENT_MAX);
+ prometheus_label_copy(family, rrdset_family(st), PROMETHEUS_ELEMENT_MAX);
+ prometheus_name_copy(context, rrdset_context(st), PROMETHEUS_ELEMENT_MAX);
+
+ int as_collected = (EXPORTING_OPTIONS_DATA_SOURCE(opts->exporting_options)
+ == EXPORTING_SOURCE_DATA_AS_COLLECTED);
+ int homogeneous = 1;
+ int prometheus_collector = 0;
+ RRDSET_FLAGS flags = rrdset_flag_get(st);
+ if (as_collected) {
+ if (flags & RRDSET_FLAG_HOMOGENEOUS_CHECK)
+ rrdset_update_heterogeneous_flag(st);
+
+ if (flags & RRDSET_FLAG_HETEROGENEOUS)
+ homogeneous = 0;
+
+ if (st->module_name == prometheus)
+ prometheus_collector = 1;
+ }
+ else {
+ if (EXPORTING_OPTIONS_DATA_SOURCE(opts->exporting_options) == EXPORTING_SOURCE_DATA_AVERAGE &&
+ !(output_options & PROMETHEUS_OUTPUT_HIDEUNITS))
+ prometheus_units_copy(units,
+ rrdset_units(st),
+ PROMETHEUS_ELEMENT_MAX,
+ output_options & PROMETHEUS_OUTPUT_OLDUNITS);
+ }
+
+ // for each dimension
+ RRDDIM *rd;
+ rrddim_foreach_read(rd, st) {
+
+ if (rd->collector.counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
+ char dimension[PROMETHEUS_ELEMENT_MAX + 1];
+ char *suffix = "";
+
+ struct gen_parameters p;
+ p.prefix = prefix;
+ p.labels_prefix = plabels_prefix;
+ p.context = context;
+ p.suffix = suffix;
+ p.chart = chart;
+ p.dimension = dimension;
+ p.family = family;
+ p.labels = (char *)opts->labels;
+ p.output_options = output_options;
+ p.st = st;
+ p.rd = rd;
+
+ if (as_collected) {
+ // we need as-collected / raw data
+
+ if (unlikely(rd->collector.last_collected_time.tv_sec < opts->instance->after))
+ continue;
+
+ p.type = "gauge";
+ p.relation = "gives";
+ if (rd->algorithm == RRD_ALGORITHM_INCREMENTAL ||
+ rd->algorithm == RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL) {
+ p.type = "counter";
+ p.relation = "delta gives";
+ if (!prometheus_collector)
+ p.suffix = "_total";
+ }
+
+ if (opts->output_options & PROMETHEUS_OUTPUT_HELP_TYPE) {
+ generate_as_collected_prom_help(wb, prefix, context, units, suffix, st);
+ generate_as_collected_prom_type(wb, prefix, context, units, p.suffix, p.type);
+ opts->output_options &= ~PROMETHEUS_OUTPUT_HELP_TYPE;
+ }
+
+ if (homogeneous) {
+ // all the dimensions of the chart, has the same algorithm, multiplier and divisor
+ // we add all dimensions as labels
+
+ prometheus_label_copy(
+ dimension,
+ (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd),
+ PROMETHEUS_ELEMENT_MAX);
+ }
+ else {
+ // the dimensions of the chart, do not have the same algorithm, multiplier or divisor
+ // we create a metric per dimension
+
+ prometheus_name_copy(
+ dimension,
+ (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd),
+ PROMETHEUS_ELEMENT_MAX);
+ }
+ generate_as_collected_from_metric(wb, &p, homogeneous, prometheus_collector, st->rrdlabels);
+ }
+ else {
+ // we need average or sum of the data
+
+ time_t last_time = opts->instance->before;
+ NETDATA_DOUBLE value = exporting_calculate_value_from_stored_data(opts->instance, rd, &last_time);
+
+ if (!isnan(value) && !isinf(value)) {
+ if (EXPORTING_OPTIONS_DATA_SOURCE(opts->exporting_options)
+ == EXPORTING_SOURCE_DATA_AVERAGE)
+ suffix = "_average";
+ else if (EXPORTING_OPTIONS_DATA_SOURCE(opts->exporting_options)
+ == EXPORTING_SOURCE_DATA_SUM)
+ suffix = "_sum";
+
+ prometheus_label_copy(
+ dimension,
+ (output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd),
+ PROMETHEUS_ELEMENT_MAX);
+
+ if (opts->output_options & PROMETHEUS_OUTPUT_HELP_TYPE) {
+ generate_as_collected_prom_help(wb, prefix, context, units, suffix, st);
+ generate_as_collected_prom_type(wb, prefix, context, units, p.suffix, "gauge");
+ opts->output_options &= ~PROMETHEUS_OUTPUT_HELP_TYPE;
+ }
+
+ buffer_flush(plabels_buffer);
+ buffer_sprintf(plabels_buffer,
+ "%1$schart=\"%2$s\",%1$sdimension=\"%3$s\",%1$sfamily=\"%4$s\"",
+ plabels_prefix,
+ chart,
+ dimension,
+ family);
+ rrdlabels_walkthrough_read(st->rrdlabels,
+ format_prometheus_chart_label_callback,
+ plabels_buffer);
+
+ if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(wb,
+ "%s_%s%s%s{%s%s} " NETDATA_DOUBLE_FORMAT " %llu\n",
+ prefix,
+ context,
+ units,
+ suffix,
+ buffer_tostring(plabels_buffer),
+ opts->labels,
+ value,
+ last_time * MSEC_PER_SEC);
+ else
+ buffer_sprintf(wb, "%s_%s%s%s{%s%s} " NETDATA_DOUBLE_FORMAT "\n",
+ prefix,
+ context,
+ units,
+ suffix,
+ buffer_tostring(plabels_buffer),
+ opts->labels,
+ value);
+ }
+ }
+ }
+ }
+ rrddim_foreach_done(rd);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * RRDCONTEXT callback
+ *
+ * Callback used to parse dictionary
+ *
+ * @param item the dictionary structure
+ * @param value unused element
+ * @param data structure used to store data.
+ *
+ * @return It always returns HTTP_RESP_OK
+ */
+static inline int prometheus_rrdcontext_callback(const DICTIONARY_ITEM *item, void *value, void *data)
+{
+ const char *context_name = dictionary_acquired_item_name(item);
+ struct host_variables_callback_options *opts = data;
+ (void)value;
+
+ opts->output_options |= PROMETHEUS_OUTPUT_HELP_TYPE;
+ (void)rrdcontext_foreach_instance_with_rrdset_in_context(opts->host, context_name, prometheus_rrdset_to_json, data);
+
+ return HTTP_RESP_OK;
+}
+
+/**
+ * Write metrics in Prometheus format to a buffer.
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @param filter_string a simple pattern filter.
+ * @param wb the buffer to fill with metrics.
+ * @param prefix a prefix for every metric.
+ * @param exporting_options options to configure what data is exported.
+ * @param allhosts set to 1 if host instance should be in the output for tags.
+ * @param output_options options to configure the format of the output.
+ */
+static void rrd_stats_api_v1_charts_allmetrics_prometheus(
+ struct instance *instance,
+ RRDHOST *host,
+ const char *filter_string,
+ BUFFER *wb,
+ const char *prefix,
+ EXPORTING_OPTIONS exporting_options,
+ int allhosts,
+ PROMETHEUS_OUTPUT_OPTIONS output_options)
+{
+ SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT, true);
+
+ char hostname[PROMETHEUS_ELEMENT_MAX + 1];
+ prometheus_label_copy(hostname, rrdhost_hostname(host), PROMETHEUS_ELEMENT_MAX);
+
+ format_host_labels_prometheus(instance, host);
+
+ buffer_sprintf(
+ wb,
+ "netdata_info{instance=\"%s\",application=\"%s\",version=\"%s\"",
+ hostname,
+ rrdhost_program_name(host),
+ rrdhost_program_version(host));
+
+ if (instance->labels_buffer && *buffer_tostring(instance->labels_buffer)) {
+ buffer_sprintf(wb, ",%s", buffer_tostring(instance->labels_buffer));
+ }
+
+ if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
+ buffer_sprintf(wb, "} 1 %llu\n", now_realtime_usec() / USEC_PER_MS);
+ else
+ buffer_sprintf(wb, "} 1\n");
+
+ char labels[PROMETHEUS_LABELS_MAX + 1] = "";
+ if (allhosts) {
+ snprintfz(labels, PROMETHEUS_LABELS_MAX, ",%sinstance=\"%s\"", instance->config.label_prefix, hostname);
+ }
+
+ if (instance->labels_buffer)
+ buffer_flush(instance->labels_buffer);
+
+ if (instance->config.options & EXPORTING_OPTION_SEND_AUTOMATIC_LABELS)
+ prometheus_print_os_info(wb, host, output_options);
+
+
+ BUFFER *plabels_buffer = buffer_create(0, NULL);
+
+ struct host_variables_callback_options opts = {
+ .host = host,
+ .wb = wb,
+ .plabels_buffer = plabels_buffer,
+ .labels = labels, // FIX: very misleading name and poor implementation of adding the "instance" label
+ .exporting_options = exporting_options,
+ .output_options = output_options,
+ .prefix = prefix,
+ .now = now_realtime_sec(),
+ .host_header_printed = 0,
+ .pattern = filter,
+ .instance = instance,
+ .prometheus = string_strdupz("prometheus")
+ };
+
+ // send custom variables set for the host
+ if (output_options & PROMETHEUS_OUTPUT_VARIABLES) {
+ rrdvar_walkthrough_read(host->rrdvars, print_host_variables_callback, &opts);
+ }
+
+ // for each context
+ if (!host->rrdctx.contexts) {
+ netdata_log_error("%s(): request for host '%s' that does not have rrdcontexts initialized.", __FUNCTION__, rrdhost_hostname(host));
+ goto allmetrics_cleanup;
+ }
+
+ dictionary_walkthrough_read(host->rrdctx.contexts, prometheus_rrdcontext_callback, &opts);
+
+allmetrics_cleanup:
+ simple_pattern_free(filter);
+ buffer_free(plabels_buffer);
+ string_freez(opts.prometheus);
+}
+
+/**
+ * Get the last time time when a server accessed Netdata. Write information about an API request to a buffer.
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @param wb the buffer to write to.
+ * @param exporting_options options to configure what data is exported.
+ * @param server the name of a Prometheus server..
+ * @param now actual time.
+ * @param output_options options to configure the format of the output.
+ * @return Returns the last time when the server accessed Netdata.
+ */
+static inline time_t prometheus_preparation(
+ struct instance *instance,
+ RRDHOST *host,
+ const char *server,
+ time_t now)
+{
+#ifndef UNIT_TESTING
+ analytics_log_prometheus();
+#endif
+ if (!server || !*server)
+ server = "default";
+
+ time_t after = prometheus_server_last_access(server, host, now);
+
+ if (!after) {
+ after = now - instance->config.update_every;
+ }
+
+ if (after > now) {
+ // oops! this should never happen
+ after = now - instance->config.update_every;
+ }
+
+ return after;
+}
+
+/**
+ * Write metrics and auxiliary information for one host to a buffer.
+ *
+ * @param host a data collecting host.
+ * @param filter_string a simple pattern filter.
+ * @param wb the buffer to write to.
+ * @param server the name of a Prometheus server.
+ * @param prefix a prefix for every metric.
+ * @param exporting_options options to configure what data is exported.
+ * @param output_options options to configure the format of the output.
+ */
+void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
+ RRDHOST *host,
+ const char *filter_string,
+ BUFFER *wb,
+ const char *server,
+ const char *prefix,
+ EXPORTING_OPTIONS exporting_options,
+ PROMETHEUS_OUTPUT_OPTIONS output_options)
+{
+ if (unlikely(!prometheus_exporter_instance || !prometheus_exporter_instance->config.initialized))
+ return;
+
+ prometheus_exporter_instance->before = now_realtime_sec();
+
+ // we start at the point we had stopped before
+ prometheus_exporter_instance->after = prometheus_preparation(
+ prometheus_exporter_instance,
+ host,
+ server,
+ prometheus_exporter_instance->before);
+
+ rrd_stats_api_v1_charts_allmetrics_prometheus(
+ prometheus_exporter_instance, host, filter_string, wb, prefix, exporting_options, 0, output_options);
+}
+
+/**
+ * Write metrics and auxiliary information for all hosts to a buffer.
+ *
+ * @param host a data collecting host.
+ * @param filter_string a simple pattern filter.
+ * @param wb the buffer to write to.
+ * @param server the name of a Prometheus server.
+ * @param prefix a prefix for every metric.
+ * @param exporting_options options to configure what data is exported.
+ * @param output_options options to configure the format of the output.
+ */
+void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
+ RRDHOST *host,
+ const char *filter_string,
+ BUFFER *wb,
+ const char *server,
+ const char *prefix,
+ EXPORTING_OPTIONS exporting_options,
+ PROMETHEUS_OUTPUT_OPTIONS output_options)
+{
+ if (unlikely(!prometheus_exporter_instance || !prometheus_exporter_instance->config.initialized))
+ return;
+
+ prometheus_exporter_instance->before = now_realtime_sec();
+
+ // we start at the point we had stopped before
+ prometheus_exporter_instance->after = prometheus_preparation(
+ prometheus_exporter_instance,
+ host,
+ server,
+ prometheus_exporter_instance->before);
+
+ dfe_start_reentrant(rrdhost_root_index, host)
+ {
+ rrd_stats_api_v1_charts_allmetrics_prometheus(
+ prometheus_exporter_instance, host, filter_string, wb, prefix, exporting_options, 1, output_options);
+ }
+ dfe_done(host);
+}
diff --git a/exporting/prometheus/prometheus.h b/src/exporting/prometheus/prometheus.h
index e80b682ae..0a537fd77 100644
--- a/exporting/prometheus/prometheus.h
+++ b/src/exporting/prometheus/prometheus.h
@@ -9,17 +9,14 @@
#define PROMETHEUS_LABELS_MAX 1024
#define PROMETHEUS_VARIABLE_MAX 256
-#define PROMETHEUS_LABELS_MAX_NUMBER 128
-
typedef enum prometheus_output_flags {
PROMETHEUS_OUTPUT_NONE = 0,
- PROMETHEUS_OUTPUT_HELP = (1 << 0),
- PROMETHEUS_OUTPUT_TYPES = (1 << 1),
+ PROMETHEUS_OUTPUT_HELP_TYPE = (1 << 1),
PROMETHEUS_OUTPUT_NAMES = (1 << 2),
PROMETHEUS_OUTPUT_TIMESTAMPS = (1 << 3),
PROMETHEUS_OUTPUT_VARIABLES = (1 << 4),
- PROMETHEUS_OUTPUT_OLDUNITS = (1 << 5),
- PROMETHEUS_OUTPUT_HIDEUNITS = (1 << 6)
+ PROMETHEUS_OUTPUT_OLDUNITS = (1 << 5),
+ PROMETHEUS_OUTPUT_HIDEUNITS = (1 << 6)
} PROMETHEUS_OUTPUT_OPTIONS;
void rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
diff --git a/exporting/prometheus/remote_write/README.md b/src/exporting/prometheus/remote_write/README.md
index 8ca4673a6..8ca4673a6 120000
--- a/exporting/prometheus/remote_write/README.md
+++ b/src/exporting/prometheus/remote_write/README.md
diff --git a/src/exporting/prometheus/remote_write/remote_write.c b/src/exporting/prometheus/remote_write/remote_write.c
new file mode 100644
index 000000000..b4b6f996b
--- /dev/null
+++ b/src/exporting/prometheus/remote_write/remote_write.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "remote_write.h"
+
+static int as_collected;
+static int homogeneous;
+char context[PROMETHEUS_ELEMENT_MAX + 1];
+char chart[PROMETHEUS_ELEMENT_MAX + 1];
+char family[PROMETHEUS_ELEMENT_MAX + 1];
+char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
+
+/**
+ * Prepare HTTP header
+ *
+ * @param instance an instance data structure.
+ */
+void prometheus_remote_write_prepare_header(struct instance *instance)
+{
+ struct prometheus_remote_write_specific_config *connector_specific_config =
+ instance->config.connector_specific_config;
+ struct simple_connector_data *simple_connector_data = instance->connector_specific_data;
+
+ buffer_sprintf(
+ simple_connector_data->last_buffer->header,
+ "POST %s HTTP/1.1\r\n"
+ "Host: %s\r\n"
+ "Accept: */*\r\n"
+ "%s"
+ "Content-Encoding: snappy\r\n"
+ "Content-Type: application/x-protobuf\r\n"
+ "X-Prometheus-Remote-Write-Version: 0.1.0\r\n"
+ "Content-Length: %zu\r\n"
+ "\r\n",
+ connector_specific_config->remote_write_path,
+ simple_connector_data->connected_to,
+ simple_connector_data->auth_string ? simple_connector_data->auth_string : "",
+ buffer_strlen(simple_connector_data->last_buffer->buffer));
+}
+
+/**
+ * Process a response received after Prometheus remote write connector had sent data
+ *
+ * @param buffer a response from a remote service.
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int process_prometheus_remote_write_response(BUFFER *buffer, struct instance *instance)
+{
+ if (unlikely(!buffer))
+ return 1;
+
+ const char *s = buffer_tostring(buffer);
+ int len = buffer_strlen(buffer);
+
+ // do nothing with HTTP responses 200 or 204
+
+ while (!isspace(*s) && len) {
+ s++;
+ len--;
+ }
+ s++;
+ len--;
+
+ if (likely(len > 4 && (!strncmp(s, "200 ", 4) || !strncmp(s, "204 ", 4))))
+ return 0;
+ else
+ return exporting_discard_response(buffer, instance);
+}
+
+/**
+ * Release specific data allocated.
+ *
+ * @param instance an instance data structure.
+ */
+void clean_prometheus_remote_write(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data = instance->connector_specific_data;
+ freez(simple_connector_data->connector_specific_data);
+
+ struct prometheus_remote_write_specific_config *connector_specific_config =
+ instance->config.connector_specific_config;
+ freez(connector_specific_config->remote_write_path);
+}
+
+/**
+ * Initialize Prometheus Remote Write connector instance
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int init_prometheus_remote_write_instance(struct instance *instance)
+{
+ instance->worker = simple_connector_worker;
+
+ instance->start_batch_formatting = NULL;
+ instance->start_host_formatting = format_host_prometheus_remote_write;
+ instance->start_chart_formatting = format_chart_prometheus_remote_write;
+ instance->metric_formatting = format_dimension_prometheus_remote_write;
+ instance->end_chart_formatting = NULL;
+ instance->variables_formatting = format_variables_prometheus_remote_write;
+ instance->end_host_formatting = NULL;
+ instance->end_batch_formatting = format_batch_prometheus_remote_write;
+
+ instance->prepare_header = prometheus_remote_write_prepare_header;
+ instance->check_response = process_prometheus_remote_write_response;
+
+ instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
+
+ if (uv_mutex_init(&instance->mutex))
+ return 1;
+ if (uv_cond_init(&instance->cond_var))
+ return 1;
+
+ struct simple_connector_data *simple_connector_data = callocz(1, sizeof(struct simple_connector_data));
+ instance->connector_specific_data = simple_connector_data;
+
+#ifdef ENABLE_HTTPS
+ simple_connector_data->ssl = NETDATA_SSL_UNSET_CONNECTION;
+ if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
+ netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX);
+ }
+#endif
+
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ callocz(1, sizeof(struct prometheus_remote_write_specific_data));
+ simple_connector_data->connector_specific_data = (void *)connector_specific_data;
+
+ simple_connector_init(instance);
+
+ connector_specific_data->write_request = init_write_request();
+
+ instance->engine->protocol_buffers_initialized = 1;
+
+ return 0;
+}
+
+struct format_remote_write_label_callback {
+ struct instance *instance;
+ void *write_request;
+};
+
+static int format_remote_write_label_callback(const char *name, const char *value, RRDLABEL_SRC ls __maybe_unused, void *data)
+{
+ struct format_remote_write_label_callback *d = (struct format_remote_write_label_callback *)data;
+
+ if (!should_send_label(d->instance, ls)) return 0;
+ char k[PROMETHEUS_ELEMENT_MAX + 1];
+ char v[PROMETHEUS_ELEMENT_MAX + 1];
+
+ prometheus_name_copy(k, name, PROMETHEUS_ELEMENT_MAX);
+ prometheus_label_copy(v, value, PROMETHEUS_ELEMENT_MAX);
+ add_label(d->write_request, k, v);
+ return 1;
+}
+
+/**
+ * Format host data for Prometheus Remote Write connector
+ *
+ * @param instance an instance data structure.
+ * @param host a data collecting host.
+ * @return Always returns 0.
+ */
+int format_host_prometheus_remote_write(struct instance *instance, RRDHOST *host)
+{
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ (struct prometheus_remote_write_specific_data *)simple_connector_data->connector_specific_data;
+
+ char hostname[PROMETHEUS_ELEMENT_MAX + 1];
+ prometheus_label_copy(
+ hostname,
+ (host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
+ PROMETHEUS_ELEMENT_MAX);
+
+ add_host_info(
+ connector_specific_data->write_request,
+ "netdata_info", hostname, rrdhost_program_name(host), rrdhost_program_version(host), now_realtime_usec() / USEC_PER_MS);
+
+ if (unlikely(sending_labels_configured(instance))) {
+ struct format_remote_write_label_callback tmp = {
+ .write_request = connector_specific_data->write_request,
+ .instance = instance
+ };
+ rrdlabels_walkthrough_read(host->rrdlabels, format_remote_write_label_callback, &tmp);
+ }
+
+ return 0;
+}
+
+/**
+ * Format chart data for Prometheus Remote Write connector
+ *
+ * @param instance an instance data structure.
+ * @param st a chart.
+ * @return Always returns 0.
+ */
+int format_chart_prometheus_remote_write(struct instance *instance, RRDSET *st)
+{
+ prometheus_label_copy(
+ chart,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? rrdset_name(st) : rrdset_id(st),
+ PROMETHEUS_ELEMENT_MAX);
+ prometheus_label_copy(family, rrdset_family(st), PROMETHEUS_ELEMENT_MAX);
+ prometheus_name_copy(context, rrdset_context(st), PROMETHEUS_ELEMENT_MAX);
+
+ as_collected = (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED);
+ homogeneous = 1;
+ if (as_collected) {
+ if (rrdset_flag_check(st, RRDSET_FLAG_HOMOGENEOUS_CHECK))
+ rrdset_update_heterogeneous_flag(st);
+
+ if (rrdset_flag_check(st, RRDSET_FLAG_HETEROGENEOUS))
+ homogeneous = 0;
+ } else {
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AVERAGE)
+ prometheus_units_copy(units, rrdset_units(st), PROMETHEUS_ELEMENT_MAX, 0);
+ }
+
+ return 0;
+}
+
+/**
+ * Format dimension data for Prometheus Remote Write connector
+ *
+ * @param instance an instance data structure.
+ * @param rd a dimension.
+ * @return Always returns 0.
+ */
+int format_dimension_prometheus_remote_write(struct instance *instance, RRDDIM *rd)
+{
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ (struct prometheus_remote_write_specific_data *)simple_connector_data->connector_specific_data;
+
+ if (rd->collector.counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
+ char name[PROMETHEUS_LABELS_MAX + 1];
+ char dimension[PROMETHEUS_ELEMENT_MAX + 1];
+ char *suffix = "";
+ RRDHOST *host = rd->rrdset->rrdhost;
+
+ if (as_collected) {
+ // we need as-collected / raw data
+
+ if (unlikely(rd->collector.last_collected_time.tv_sec < instance->after)) {
+ netdata_log_debug(
+ D_EXPORTING,
+ "EXPORTING: not sending dimension '%s' of chart '%s' from host '%s', "
+ "its last data collection (%lu) is not within our timeframe (%lu to %lu)",
+ rrddim_id(rd), rrdset_id(rd->rrdset),
+ (host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
+ (unsigned long)rd->collector.last_collected_time.tv_sec,
+ (unsigned long)instance->after,
+ (unsigned long)instance->before);
+ return 0;
+ }
+
+ if (rd->algorithm == RRD_ALGORITHM_INCREMENTAL || rd->algorithm == RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL) {
+ if (strcmp(rrdset_module_name(rd->rrdset), "prometheus"))
+ suffix = "_total";
+ }
+
+ if (homogeneous) {
+ // all the dimensions of the chart, has the same algorithm, multiplier and divisor
+ // we add all dimensions as labels
+
+ prometheus_label_copy(
+ dimension,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd),
+ PROMETHEUS_ELEMENT_MAX);
+ snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", instance->config.prefix, context, suffix);
+
+ add_metric(
+ connector_specific_data->write_request,
+ name, chart, family, dimension,
+ (host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
+ rd->collector.last_collected_value, timeval_msec(&rd->collector.last_collected_time));
+ } else {
+ // the dimensions of the chart, do not have the same algorithm, multiplier or divisor
+ // we create a metric per dimension
+
+ prometheus_name_copy(
+ dimension,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd),
+ PROMETHEUS_ELEMENT_MAX);
+ snprintf(
+ name, PROMETHEUS_LABELS_MAX, "%s_%s_%s%s", instance->config.prefix, context, dimension,
+ suffix);
+
+ add_metric(
+ connector_specific_data->write_request,
+ name, chart, family, NULL,
+ (host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
+ rd->collector.last_collected_value, timeval_msec(&rd->collector.last_collected_time));
+ }
+ } else {
+ // we need average or sum of the data
+
+ time_t last_t = instance->before;
+ NETDATA_DOUBLE value = exporting_calculate_value_from_stored_data(instance, rd, &last_t);
+
+ if (!isnan(value) && !isinf(value)) {
+ if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AVERAGE)
+ suffix = "_average";
+ else if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_SUM)
+ suffix = "_sum";
+
+ prometheus_label_copy(
+ dimension,
+ (instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd),
+ PROMETHEUS_ELEMENT_MAX);
+ snprintf(
+ name, PROMETHEUS_LABELS_MAX, "%s_%s%s%s", instance->config.prefix, context, units, suffix);
+
+ add_metric(
+ connector_specific_data->write_request,
+ name, chart, family, dimension,
+ (host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
+ value, last_t * MSEC_PER_SEC);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int format_variable_prometheus_remote_write_callback(const DICTIONARY_ITEM *item __maybe_unused, void *rv_ptr __maybe_unused, void *data) {
+ const RRDVAR_ACQUIRED *rv = (const RRDVAR_ACQUIRED *)item;
+
+ struct prometheus_remote_write_variables_callback_options *opts = data;
+
+ RRDHOST *host = opts->host;
+ struct instance *instance = opts->instance;
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ (struct prometheus_remote_write_specific_data *)simple_connector_data->connector_specific_data;
+
+ char name[PROMETHEUS_LABELS_MAX + 1];
+ char *suffix = "";
+
+ prometheus_name_copy(context, rrdvar_name(rv), PROMETHEUS_ELEMENT_MAX);
+ snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", instance->config.prefix, context, suffix);
+
+ NETDATA_DOUBLE value = rrdvar2number(rv);
+ add_variable(connector_specific_data->write_request, name,
+ (host == localhost) ? instance->config.hostname : rrdhost_hostname(host), value, opts->now / USEC_PER_MS);
+
+ return 0;
+}
+
+/**
+ * Format a variable for Prometheus Remote Write connector
+ *
+ * @param rv a variable.
+ * @param instance an instance data structure.
+ * @return Always returns 0.
+ */
+int format_variables_prometheus_remote_write(struct instance *instance, RRDHOST *host)
+{
+ struct prometheus_remote_write_variables_callback_options opt = {
+ .host = host,
+ .instance = instance,
+ .now = now_realtime_usec(),
+ };
+
+ return rrdvar_walkthrough_read(host->rrdvars, format_variable_prometheus_remote_write_callback, &opt);
+}
+
+/**
+ * Format a batch for Prometheus Remote Write connector
+ *
+ * @param instance an instance data structure.
+ * @return Returns 0 on success, 1 on failure.
+ */
+int format_batch_prometheus_remote_write(struct instance *instance)
+{
+ struct simple_connector_data *simple_connector_data =
+ (struct simple_connector_data *)instance->connector_specific_data;
+ struct prometheus_remote_write_specific_data *connector_specific_data =
+ (struct prometheus_remote_write_specific_data *)simple_connector_data->connector_specific_data;
+
+ size_t data_size = get_write_request_size(connector_specific_data->write_request);
+
+ if (unlikely(!data_size)) {
+ netdata_log_error("EXPORTING: write request size is out of range");
+ return 1;
+ }
+
+ BUFFER *buffer = instance->buffer;
+
+ buffer_need_bytes(buffer, data_size);
+ if (unlikely(pack_and_clear_write_request(connector_specific_data->write_request, buffer->buffer, &data_size))) {
+ netdata_log_error("EXPORTING: cannot pack write request");
+ return 1;
+ }
+ buffer->len = data_size;
+
+ simple_connector_end_batch(instance);
+
+ return 0;
+}
diff --git a/exporting/prometheus/remote_write/remote_write.h b/src/exporting/prometheus/remote_write/remote_write.h
index d4e86494b..d4e86494b 100644
--- a/exporting/prometheus/remote_write/remote_write.h
+++ b/src/exporting/prometheus/remote_write/remote_write.h
diff --git a/exporting/prometheus/remote_write/remote_write.proto b/src/exporting/prometheus/remote_write/remote_write.proto
index dfde254e1..dfde254e1 100644
--- a/exporting/prometheus/remote_write/remote_write.proto
+++ b/src/exporting/prometheus/remote_write/remote_write.proto
diff --git a/exporting/prometheus/remote_write/remote_write_request.cc b/src/exporting/prometheus/remote_write/remote_write_request.cc
index a628082d1..a628082d1 100644
--- a/exporting/prometheus/remote_write/remote_write_request.cc
+++ b/src/exporting/prometheus/remote_write/remote_write_request.cc
diff --git a/exporting/prometheus/remote_write/remote_write_request.h b/src/exporting/prometheus/remote_write/remote_write_request.h
index b25370133..b25370133 100644
--- a/exporting/prometheus/remote_write/remote_write_request.h
+++ b/src/exporting/prometheus/remote_write/remote_write_request.h
diff --git a/exporting/pubsub/README.md b/src/exporting/pubsub/README.md
index 8633f1725..8633f1725 120000
--- a/exporting/pubsub/README.md
+++ b/src/exporting/pubsub/README.md
diff --git a/exporting/pubsub/integrations/google_cloud_pub_sub.md b/src/exporting/pubsub/integrations/google_cloud_pub_sub.md
index c24833146..1adfd408e 100644
--- a/exporting/pubsub/integrations/google_cloud_pub_sub.md
+++ b/src/exporting/pubsub/integrations/google_cloud_pub_sub.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/pubsub/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/pubsub/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/pubsub/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/exporting/pubsub/metadata.yaml"
sidebar_label: "Google Cloud Pub Sub"
learn_status: "Published"
-learn_rel_path: "Exporting"
+learn_rel_path: "Exporting Metrics"
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE"
endmeta-->
@@ -39,7 +39,7 @@ The configuration file name for this integration is `exporting.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -50,7 +50,7 @@ sudo ./edit-config exporting.conf
The following options can be defined for this exporter.
-<details><summary>Config options</summary>
+<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
@@ -64,7 +64,7 @@ The following options can be defined for this exporter.
| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no |
| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no |
| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no |
-| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
+| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns). | localhost * | no |
| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no |
| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no |
| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no |
diff --git a/src/exporting/pubsub/metadata.yaml b/src/exporting/pubsub/metadata.yaml
new file mode 100644
index 000000000..48ec02c47
--- /dev/null
+++ b/src/exporting/pubsub/metadata.yaml
@@ -0,0 +1,152 @@
+# yamllint disable rule:line-length
+---
+id: 'export-google-pubsub'
+meta:
+ name: 'Google Cloud Pub Sub'
+ link: 'https://cloud.google.com/pubsub'
+ categories:
+ - export
+ icon_filename: 'pubsub.png'
+keywords:
+ - exporter
+ - Google Cloud
+ - Pub Sub
+overview:
+ exporter_description: |
+ Export metrics to Google Cloud Pub/Sub Service
+ exporter_limitations: ''
+setup:
+ prerequisites:
+ list:
+ - title: ''
+ description: |
+ - First [install](https://github.com/googleapis/google-cloud-cpp/) install Google Cloud Platform C++ Client Libraries
+ - Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`
+ - Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.
+ configuration:
+ file:
+ name: 'exporting.conf'
+ options:
+ description: |
+ The following options can be defined for this exporter.
+ folding:
+ title: 'Config options'
+ enabled: true
+ list:
+ - name: 'enabled'
+ default_value: 'no'
+ description: 'Enables or disables an exporting connector instance (yes|no).'
+ required: true
+ - name: 'destination'
+ default_value: 'pubsub.googleapis.com'
+ description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.'
+ required: true
+ detailed_description: |
+ The format of each item in this list, is: [PROTOCOL:]IP[:PORT].
+ - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine.
+ - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port.
+ - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used.
+
+ Example IPv4:
+ ```yaml
+ destination = pubsub.googleapis.com
+ ```
+ When multiple servers are defined, Netdata will try the next one when the previous one fails.
+ - name: 'username'
+ default_value: 'my_username'
+ description: 'Username for HTTP authentication'
+ required: false
+ - name: 'password'
+ default_value: 'my_password'
+ description: 'Password for HTTP authentication'
+ required: false
+ - name: 'data source'
+ default_value: ''
+ description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)'
+ required: false
+ - name: 'hostname'
+ default_value: '[global].hostname'
+ description: 'The hostname to be used for sending data to the external database server.'
+ required: false
+ - name: 'prefix'
+ default_value: 'Netdata'
+ description: 'The prefix to add to all metrics.'
+ required: false
+ - name: 'update every'
+ default_value: '10'
+ description: |
+ Frequency of sending sending data to the external database, in seconds.
+ required: false
+ detailed_description: |
+ Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers
+ send data to the same database. This randomness does not affect the quality of the data, only the time they are sent.
+ - name: 'buffer on failures'
+ default_value: '10'
+ description: |
+ The number of iterations (`update every` seconds) to buffer data, when the external database server is not available.
+ required: false
+ detailed_description: |
+ If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it).
+ - name: 'timeout ms'
+ default_value: '2 * update_every * 1000'
+ description: 'The timeout in milliseconds to wait for the external database server to process the data.'
+ required: false
+ - name: 'send hosts matching'
+ default_value: 'localhost *'
+ description: |
+ Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#simple-patterns).
+ required: false
+ detailed_description: |
+ Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern).
+ The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to
+ filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts.
+
+ A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`,
+ use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative).
+ - name: 'send charts matching'
+ default_value: '*'
+ description: |
+ One or more space separated patterns (use * as wildcard) checked against both chart id and chart name.
+ required: false
+ detailed_description: |
+ A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads,
+ use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used,
+ positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter
+ has a higher priority than the configuration option.
+ - name: 'send names instead of ids'
+ default_value: ''
+ description: 'Controls the metric names Netdata should send to the external database (yes|no).'
+ required: false
+ detailed_description: |
+ Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names
+ are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are
+ different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
+ - name: 'send configured labels'
+ default_value: ''
+ description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).'
+ required: false
+ - name: 'send automatic labels'
+ default_value: ''
+ description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).'
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ''
+ list:
+ - name: 'Basic configuration'
+ folding:
+ enabled: false
+ description: |
+ - Set the destination option to a Pub/Sub service endpoint. pubsub.googleapis.com is the default one.
+ - Create the credentials JSON file by following Google Cloud's authentication guide.
+ - The user running the Agent (typically netdata) needs read access to google_cloud_credentials.json, which you can set
+ `chmod 400 google_cloud_credentials.json; chown netdata google_cloud_credentials.json`
+ - Set the credentials file option to the full path of the file.
+ config: |
+ [pubsub:my_instance]
+ enabled = yes
+ destination = pubsub.googleapis.com
+ credentials file = /etc/netdata/google_cloud_credentials.json
+ project id = my_project
+ topic id = my_topic
diff --git a/exporting/pubsub/pubsub.c b/src/exporting/pubsub/pubsub.c
index 4989160a4..7fc416258 100644
--- a/exporting/pubsub/pubsub.c
+++ b/src/exporting/pubsub/pubsub.c
@@ -99,6 +99,10 @@ void pubsub_connector_worker(void *instance_p)
struct pubsub_specific_config *connector_specific_config = instance->config.connector_specific_config;
struct pubsub_specific_data *connector_specific_data = instance->connector_specific_data;
+ char threadname[ND_THREAD_TAG_MAX + 1];
+ snprintfz(threadname, ND_THREAD_TAG_MAX, "EXPPBSB[%zu]", instance->index);
+ uv_thread_set_name_np(threadname);
+
while (!instance->engine->exit) {
struct stats *stats = &instance->stats;
char error_message[ERROR_LINE_MAX + 1] = "";
diff --git a/exporting/pubsub/pubsub.h b/src/exporting/pubsub/pubsub.h
index 0bcb76f9b..0bcb76f9b 100644
--- a/exporting/pubsub/pubsub.h
+++ b/src/exporting/pubsub/pubsub.h
diff --git a/exporting/pubsub/pubsub_publish.cc b/src/exporting/pubsub/pubsub_publish.cc
index cc14154f8..cc14154f8 100644
--- a/exporting/pubsub/pubsub_publish.cc
+++ b/src/exporting/pubsub/pubsub_publish.cc
diff --git a/exporting/pubsub/pubsub_publish.h b/src/exporting/pubsub/pubsub_publish.h
index 567a262f0..567a262f0 100644
--- a/exporting/pubsub/pubsub_publish.h
+++ b/src/exporting/pubsub/pubsub_publish.h
diff --git a/exporting/read_config.c b/src/exporting/read_config.c
index cd8af6bf6..cd8af6bf6 100644
--- a/exporting/read_config.c
+++ b/src/exporting/read_config.c
diff --git a/exporting/sample-metadata.yaml b/src/exporting/sample-metadata.yaml
index 41a287aeb..41a287aeb 100644
--- a/exporting/sample-metadata.yaml
+++ b/src/exporting/sample-metadata.yaml
diff --git a/exporting/send_data.c b/src/exporting/send_data.c
index e8b8aaf60..b79f0a3e3 100644
--- a/exporting/send_data.c
+++ b/src/exporting/send_data.c
@@ -31,7 +31,7 @@ static int exporting_tls_is_enabled(EXPORTING_CONNECTOR_TYPE type __maybe_unused
* @return Always returns 0.
*/
int exporting_discard_response(BUFFER *buffer, struct instance *instance) {
-#if NETDATA_INTERNAL_CHECKS
+#ifdef NETDATA_INTERNAL_CHECKS
char sample[1024];
const char *s = buffer_tostring(buffer);
char *d = sample, *e = &sample[sizeof(sample) - 1];
@@ -217,6 +217,10 @@ void simple_connector_worker(void *instance_p)
struct instance *instance = (struct instance*)instance_p;
struct simple_connector_data *connector_specific_data = instance->connector_specific_data;
+ char threadname[ND_THREAD_TAG_MAX + 1];
+ snprintfz(threadname, ND_THREAD_TAG_MAX, "EXPSMPL[%zu]", instance->index);
+ uv_thread_set_name_np(threadname);
+
#ifdef ENABLE_HTTPS
uint32_t options = (uint32_t)instance->config.options;
@@ -391,7 +395,7 @@ void simple_connector_worker(void *instance_p)
#endif
}
-#if ENABLE_PROMETHEUS_REMOTE_WRITE
+#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
if (instance->config.type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE)
clean_prometheus_remote_write(instance);
#endif
diff --git a/exporting/send_internal_metrics.c b/src/exporting/send_internal_metrics.c
index 677a57bbb..677a57bbb 100644
--- a/exporting/send_internal_metrics.c
+++ b/src/exporting/send_internal_metrics.c