summaryrefslogtreecommitdiffstats
path: root/src/exporting
diff options
context:
space:
mode:
Diffstat (limited to 'src/exporting')
-rw-r--r--src/exporting/README.md102
-rw-r--r--src/exporting/TIMESCALE.md19
-rw-r--r--src/exporting/WALKTHROUGH.md14
-rw-r--r--src/exporting/aws_kinesis/integrations/aws_kinesis.md4
-rw-r--r--src/exporting/clean_connectors.c2
-rw-r--r--src/exporting/exporting_engine.c7
-rw-r--r--src/exporting/exporting_engine.h2
-rw-r--r--src/exporting/graphite/graphite.c2
-rw-r--r--src/exporting/graphite/integrations/blueflood.md4
-rw-r--r--src/exporting/graphite/integrations/graphite.md4
-rw-r--r--src/exporting/graphite/integrations/influxdb.md4
-rw-r--r--src/exporting/graphite/integrations/kairosdb.md4
-rw-r--r--src/exporting/json/integrations/json.md4
-rw-r--r--src/exporting/json/json.c2
-rw-r--r--src/exporting/mongodb/integrations/mongodb.md6
-rw-r--r--src/exporting/mongodb/metadata.yaml2
-rw-r--r--src/exporting/opentsdb/integrations/opentsdb.md4
-rw-r--r--src/exporting/opentsdb/opentsdb.c4
-rw-r--r--src/exporting/prometheus/README.md6
-rw-r--r--src/exporting/prometheus/integrations/appoptics.md4
-rw-r--r--src/exporting/prometheus/integrations/azure_data_explorer.md4
-rw-r--r--src/exporting/prometheus/integrations/azure_event_hub.md4
-rw-r--r--src/exporting/prometheus/integrations/chronix.md4
-rw-r--r--src/exporting/prometheus/integrations/cortex.md4
-rw-r--r--src/exporting/prometheus/integrations/cratedb.md4
-rw-r--r--src/exporting/prometheus/integrations/elasticsearch.md4
-rw-r--r--src/exporting/prometheus/integrations/gnocchi.md4
-rw-r--r--src/exporting/prometheus/integrations/google_bigquery.md4
-rw-r--r--src/exporting/prometheus/integrations/greptimedb.md4
-rw-r--r--src/exporting/prometheus/integrations/irondb.md4
-rw-r--r--src/exporting/prometheus/integrations/kafka.md4
-rw-r--r--src/exporting/prometheus/integrations/m3db.md4
-rw-r--r--src/exporting/prometheus/integrations/metricfire.md4
-rw-r--r--src/exporting/prometheus/integrations/new_relic.md4
-rw-r--r--src/exporting/prometheus/integrations/opeansearch.md4
-rw-r--r--src/exporting/prometheus/integrations/postgresql.md4
-rw-r--r--src/exporting/prometheus/integrations/prometheus_remote_write.md4
-rw-r--r--src/exporting/prometheus/integrations/quasardb.md4
-rw-r--r--src/exporting/prometheus/integrations/splunk_signalfx.md4
-rw-r--r--src/exporting/prometheus/integrations/thanos.md4
-rw-r--r--src/exporting/prometheus/integrations/tikv.md4
-rw-r--r--src/exporting/prometheus/integrations/timescaledb.md4
-rw-r--r--src/exporting/prometheus/integrations/victoriametrics.md4
-rw-r--r--src/exporting/prometheus/integrations/vmware_aria.md4
-rw-r--r--src/exporting/prometheus/integrations/wavefront.md4
-rw-r--r--src/exporting/prometheus/prometheus.c71
-rw-r--r--src/exporting/prometheus/prometheus.h4
-rw-r--r--src/exporting/prometheus/remote_write/remote_write.c26
-rw-r--r--src/exporting/pubsub/integrations/google_cloud_pub_sub.md4
-rw-r--r--src/exporting/read_config.c19
-rw-r--r--src/exporting/send_data.c24
51 files changed, 178 insertions, 270 deletions
diff --git a/src/exporting/README.md b/src/exporting/README.md
index 83b391f72..a626ee66b 100644
--- a/src/exporting/README.md
+++ b/src/exporting/README.md
@@ -1,13 +1,3 @@
-<!--
-title: "Exporting reference"
-description: "With the exporting engine, you can archive your Netdata metrics to multiple external databases for long-term storage or further analysis."
-sidebar_label: "Export"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/README.md"
-learn_status: "Published"
-learn_rel_path: "Integrations/Export"
-learn_doc_purpose: "Explain the exporting engine options and all of our the exporting connectors options"
--->
-
# Exporting reference
Welcome to the exporting engine reference guide. This guide contains comprehensive information about enabling,
@@ -18,7 +8,7 @@ For a quick introduction to the exporting engine's features, read our doc on [ex
databases](/docs/exporting-metrics/README.md), or jump in to [enabling a connector](/docs/exporting-metrics/enable-an-exporting-connector.md).
The exporting engine has a modular structure and supports metric exporting via multiple exporting connector instances at
-the same time. You can have different update intervals and filters configured for every exporting connector instance.
+the same time. You can have different update intervals and filters configured for every exporting connector instance.
When you enable the exporting engine and a connector, the Netdata Agent exports metrics _beginning from the time you
restart its process_, not the entire [database of long-term metrics](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md).
@@ -37,24 +27,24 @@ The exporting engine uses a number of connectors to send Netdata metrics to exte
[list of supported databases](/docs/exporting-metrics/README.md#supported-databases) for information on which
connector to enable and configure for your database of choice.
-- [**AWS Kinesis Data Streams**](/src/exporting/aws_kinesis/README.md): Metrics are sent to the service in `JSON`
+- [**AWS Kinesis Data Streams**](/src/exporting/aws_kinesis/README.md): Metrics are sent to the service in `JSON`
format.
-- [**Google Cloud Pub/Sub Service**](/src/exporting/pubsub/README.md): Metrics are sent to the service in `JSON`
+- [**Google Cloud Pub/Sub Service**](/src/exporting/pubsub/README.md): Metrics are sent to the service in `JSON`
format.
-- [**Graphite**](/src/exporting/graphite/README.md): A plaintext interface. Metrics are sent to the database server as
+- [**Graphite**](/src/exporting/graphite/README.md): A plaintext interface. Metrics are sent to the database server as
`prefix.hostname.chart.dimension`. `prefix` is configured below, `hostname` is the hostname of the machine (can
also be configured). Learn more in our guide to [export and visualize Netdata metrics in
Graphite](/src/exporting/graphite/README.md).
-- [**JSON** document databases](/src/exporting/json/README.md)
-- [**OpenTSDB**](/src/exporting/opentsdb/README.md): Use a plaintext or HTTP interfaces. Metrics are sent to
+- [**JSON** document databases](/src/exporting/json/README.md)
+- [**OpenTSDB**](/src/exporting/opentsdb/README.md): Use a plaintext or HTTP interfaces. Metrics are sent to
OpenTSDB as `prefix.chart.dimension` with tag `host=hostname`.
-- [**MongoDB**](/src/exporting/mongodb/README.md): Metrics are sent to the database in `JSON` format.
-- [**Prometheus**](/src/exporting/prometheus/README.md): Use an existing Prometheus installation to scrape metrics
+- [**MongoDB**](/src/exporting/mongodb/README.md): Metrics are sent to the database in `JSON` format.
+- [**Prometheus**](/src/exporting/prometheus/README.md): Use an existing Prometheus installation to scrape metrics
from node using the Netdata API.
-- [**Prometheus remote write**](/src/exporting/prometheus/remote_write/README.md). A binary snappy-compressed protocol
+- [**Prometheus remote write**](/src/exporting/prometheus/remote_write/README.md). A binary snappy-compressed protocol
buffer encoding over HTTP. Supports many [storage
providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
-- [**TimescaleDB**](/src/exporting/TIMESCALE.md): Use a community-built connector that takes JSON streams from a
+- [**TimescaleDB**](/src/exporting/TIMESCALE.md): Use a community-built connector that takes JSON streams from a
Netdata client and writes them to a TimescaleDB table.
### Chart filtering
@@ -62,14 +52,14 @@ connector to enable and configure for your database of choice.
Netdata can filter metrics, to send only a subset of the collected metrics. You can use the
configuration file
-```txt
+```text
[prometheus:exporter]
send charts matching = system.*
```
or the URL parameter `filter` in the `allmetrics` API call.
-```txt
+```text
http://localhost:19999/api/v1/allmetrics?format=shell&filter=system.*
```
@@ -77,17 +67,17 @@ http://localhost:19999/api/v1/allmetrics?format=shell&filter=system.*
Netdata supports three modes of operation for all exporting connectors:
-- `as-collected` sends to external databases the metrics as they are collected, in the units they are collected.
+- `as-collected` sends to external databases the metrics as they are collected, in the units they are collected.
So, counters are sent as counters and gauges are sent as gauges, much like all data collectors do. For example,
to calculate CPU utilization in this format, you need to know how to convert kernel ticks to percentage.
-- `average` sends to external databases normalized metrics from the Netdata database. In this mode, all metrics
+- `average` sends to external databases normalized metrics from the Netdata database. In this mode, all metrics
are sent as gauges, in the units Netdata uses. This abstracts data collection and simplifies visualization, but
you will not be able to copy and paste queries from other sources to convert units. For example, CPU utilization
percentage is calculated by Netdata, so Netdata will convert ticks to percentage and send the average percentage
to the external database.
-- `sum` or `volume`: the sum of the interpolated values shown on the Netdata graphs is sent to the external
+- `sum` or `volume`: the sum of the interpolated values shown on the Netdata graphs is sent to the external
database. So, if Netdata is configured to send data to the database every 10 seconds, the sum of the 10 values
shown on the Netdata charts will be used.
@@ -102,7 +92,7 @@ see in Netdata, which is not necessarily true for the other modes of operation.
### Independent operation
-This code is smart enough, not to slow down Netdata, independently of the speed of the external database server.
+This code is smart enough, not to slow down Netdata, independently of the speed of the external database server.
> ❗ You should keep in mind though that many exporting connector instances can consume a lot of CPU resources if they
> run their batches at the same time. You can set different update intervals for every exporting connector instance,
@@ -111,12 +101,12 @@ This code is smart enough, not to slow down Netdata, independently of the speed
## Configuration
Here are the configuration blocks for every supported connector. Your current `exporting.conf` file may look a little
-different.
+different.
You can configure each connector individually using the available [options](#options). The
`[graphite:my_graphite_instance]` block contains examples of some of these additional options in action.
-```conf
+```text
[exporting:global]
enabled = yes
send configured labels = no
@@ -192,23 +182,23 @@ You can configure each connector individually using the available [options](#opt
### Sections
-- `[exporting:global]` is a section where you can set your defaults for all exporting connectors
-- `[prometheus:exporter]` defines settings for Prometheus exporter API queries (e.g.:
+- `[exporting:global]` is a section where you can set your defaults for all exporting connectors
+- `[prometheus:exporter]` defines settings for Prometheus exporter API queries (e.g.:
`http://NODE:19999/api/v1/allmetrics?format=prometheus&help=yes&source=as-collected`).
-- `[<type>:<name>]` keeps settings for a particular exporting connector instance, where:
- - `type` selects the exporting connector type: graphite | opentsdb:telnet | opentsdb:http |
+- `[<type>:<name>]` keeps settings for a particular exporting connector instance, where:
+- `type` selects the exporting connector type: graphite | opentsdb:telnet | opentsdb:http |
prometheus_remote_write | json | kinesis | pubsub | mongodb. For graphite, opentsdb,
json, and prometheus_remote_write connectors you can also use `:http` or `:https` modifiers
(e.g.: `opentsdb:https`).
- - `name` can be arbitrary instance name you chose.
+- `name` can be arbitrary instance name you chose.
### Options
Configure individual connectors and override any global settings with the following options.
-- `enabled = yes | no`, enables or disables an exporting connector instance
+- `enabled = yes | no`, enables or disables an exporting connector instance
-- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, IPs (IPv4 and IPv6) and
+- `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, IPs (IPv4 and IPv6) and
ports to connect to. Netdata will use the **first available** to send the metrics.
The format of each item in this list, is: `[PROTOCOL:]IP[:PORT]`.
@@ -223,13 +213,13 @@ Configure individual connectors and override any global settings with the follow
Example IPv4:
-```conf
+```text
destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242
```
Example IPv6 and IPv4 together:
-```conf
+```text
destination = [ffff:...:0001]:2003 10.11.12.1:2003
```
@@ -246,48 +236,48 @@ Configure individual connectors and override any global settings with the follow
For the Pub/Sub exporting connector `destination` can be set to a specific service endpoint.
-- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of data that will
+- `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of data that will
be sent to the external database.
-- `hostname = my-name`, is the hostname to be used for sending data to the external database server. By default this
+- `hostname = my-name`, is the hostname to be used for sending data to the external database server. By default this
is `[global].hostname`.
-- `prefix = Netdata`, is the prefix to add to all metrics.
+- `prefix = Netdata`, is the prefix to add to all metrics.
-- `update every = 10`, is the number of seconds between sending data to the external database. Netdata will add some
+- `update every = 10`, is the number of seconds between sending data to the external database. Netdata will add some
randomness to this number, to prevent stressing the external server when many Netdata servers send data to the same
database. This randomness does not affect the quality of the data, only the time they are sent.
-- `buffer on failures = 10`, is the number of iterations (each iteration is `update every` seconds) to buffer data,
+- `buffer on failures = 10`, is the number of iterations (each iteration is `update every` seconds) to buffer data,
when the external database server is not available. If the server fails to receive the data after that many
failures, data loss on the connector instance is expected (Netdata will also log it).
-- `timeout ms = 20000`, is the timeout in milliseconds to wait for the external database server to process the data.
+- `timeout ms = 20000`, is the timeout in milliseconds to wait for the external database server to process the data.
By default this is `2 * update_every * 1000`.
-- `send hosts matching = localhost *` includes one or more space separated patterns, using `*` as wildcard (any number
+- `send hosts matching = localhost *` includes one or more space separated patterns, using `*` as wildcard (any number
of times within each pattern). The patterns are checked against the hostname (the localhost is always checked as
`localhost`), allowing us to filter which hosts will be sent to the external database when this Netdata is a central
Netdata aggregating multiple hosts. A pattern starting with `!` gives a negative match. So to match all hosts named
`*db*` except hosts containing `*child*`, use `!*child* *db*` (so, the order is important: the first
pattern matching the hostname will be used - positive or negative).
-- `send charts matching = *` includes one or more space separated patterns, using `*` as wildcard (any number of times
+- `send charts matching = *` includes one or more space separated patterns, using `*` as wildcard (any number of times
within each pattern). The patterns are checked against both chart id and chart name. A pattern starting with `!`
gives a negative match. So to match all charts named `apps.*` except charts ending in `*reads`, use `!*reads
apps.*` (so, the order is important: the first pattern matching the chart id or the chart name will be used -
positive or negative). There is also a URL parameter `filter` that can be used while querying `allmetrics`. The URL
parameter has a higher priority than the configuration option.
-- `send names instead of ids = yes | no` controls the metric names Netdata should send to the external database.
+- `send names instead of ids = yes | no` controls the metric names Netdata should send to the external database.
Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system
and names are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several
cases they are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
-- `send configured labels = yes | no` controls if host labels defined in the `[host labels]` section in `netdata.conf`
+- `send configured labels = yes | no` controls if host labels defined in the `[host labels]` section in `netdata.conf`
should be sent to the external database
-- `send automatic labels = yes | no` controls if automatically created labels, like `_os_name` or `_architecture`
+- `send automatic labels = yes | no` controls if automatically created labels, like `_os_name` or `_architecture`
should be sent to the external database
## HTTPS
@@ -302,14 +292,14 @@ HTTPS communication between Netdata and an external database. You can set up a r
Netdata creates five charts in the dashboard, under the **Netdata Monitoring** section, to help you monitor the health
and performance of the exporting engine itself:
-1. **Buffered metrics**, the number of metrics Netdata added to the buffer for dispatching them to the
+1. **Buffered metrics**, the number of metrics Netdata added to the buffer for dispatching them to the
external database server.
-2. **Exporting data size**, the amount of data (in KB) Netdata added the buffer.
+2. **Exporting data size**, the amount of data (in KB) Netdata added the buffer.
-3. **Exporting operations**, the number of operations performed by Netdata.
+3. **Exporting operations**, the number of operations performed by Netdata.
-4. **Exporting thread CPU usage**, the CPU resources consumed by the Netdata thread, that is responsible for sending
+4. **Exporting thread CPU usage**, the CPU resources consumed by the Netdata thread, that is responsible for sending
the metrics to the external database server.
![image](https://cloud.githubusercontent.com/assets/2662304/20463536/eb196084-af3d-11e6-8ee5-ddbd3b4d8449.png)
@@ -318,10 +308,8 @@ and performance of the exporting engine itself:
Netdata adds 3 alerts:
-1. `exporting_last_buffering`, number of seconds since the last successful buffering of exported data
-2. `exporting_metrics_sent`, percentage of metrics sent to the external database server
-3. `exporting_metrics_lost`, number of metrics lost due to repeating failures to contact the external database server
+1. `exporting_last_buffering`, number of seconds since the last successful buffering of exported data
+2. `exporting_metrics_sent`, percentage of metrics sent to the external database server
+3. `exporting_metrics_lost`, number of metrics lost due to repeating failures to contact the external database server
![image](https://cloud.githubusercontent.com/assets/2662304/20463779/a46ed1c2-af43-11e6-91a5-07ca4533cac3.png)
-
-
diff --git a/src/exporting/TIMESCALE.md b/src/exporting/TIMESCALE.md
index 3bad28379..179527c14 100644
--- a/src/exporting/TIMESCALE.md
+++ b/src/exporting/TIMESCALE.md
@@ -1,12 +1,3 @@
-<!--
-title: "Writing metrics to TimescaleDB"
-description: "Send Netdata metrics to TimescaleDB for long-term archiving and further analysis."
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/exporting/TIMESCALE.md"
-sidebar_label: "Writing metrics to TimescaleDB"
-learn_status: "Published"
-learn_rel_path: "Integrations/Export"
--->
-
# Writing metrics to TimescaleDB
Thanks to Netdata's community of developers and system administrators, and Mahlon Smith
@@ -23,14 +14,18 @@ What's TimescaleDB? Here's how their team defines the project on their [GitHub p
To get started archiving metrics to TimescaleDB right away, check out Mahlon's [`netdata-timescale-relay`
repository](https://github.com/mahlonsmith/netdata-timescale-relay) on GitHub. Please be aware that backends subsystem
was removed and Netdata configuration should be moved to the new `exporting.conf` configuration file. Use
-```conf
+
+```text
[json:my_instance]
```
+
in `exporting.conf` instead of
-```conf
+
+```text
[backend]
type = json
```
+
in `netdata.conf`.
This small program takes JSON streams from a Netdata client and writes them to a PostgreSQL (aka TimescaleDB) table.
@@ -67,5 +62,3 @@ blog](https://blog.timescale.com/blog/writing-it-metrics-from-netdata-to-timesca
Thank you to Mahlon, Rune, TimescaleDB, and the members of the Netdata community that requested and then built this
exporting connection between Netdata and TimescaleDB!
-
-
diff --git a/src/exporting/WALKTHROUGH.md b/src/exporting/WALKTHROUGH.md
index 450789d9d..1b3d255bd 100644
--- a/src/exporting/WALKTHROUGH.md
+++ b/src/exporting/WALKTHROUGH.md
@@ -37,7 +37,7 @@ This stack will offer you visibility into your application and systems performan
To begin let's create our container which we will install Netdata on. We need to run a container, forward the necessary
port that Netdata listens on, and attach a tty so we can interact with the bash shell on the container. But before we do
this we want name resolution between the two containers to work. In order to accomplish this we will create a
-user-defined network and attach both containers to this network. The first command we should run is:
+user-defined network and attach both containers to this network. The first command we should run is:
```sh
docker network create --driver bridge netdata-tutorial
@@ -90,15 +90,15 @@ We will be installing Prometheus in a container for purpose of demonstration. Wh
container I would like to walk through the install process and setup on a fresh container. This will allow anyone
reading to migrate this tutorial to a VM or Server of any sort.
-Let's start another container in the same fashion as we did the Netdata container.
+Let's start another container in the same fashion as we did the Netdata container.
```sh
docker run -it --name prometheus --hostname prometheus \
--network=netdata-tutorial -p 9090:9090 centos:latest '/bin/bash'
-```
+```
This should drop you into a shell once again. Once there quickly install your favorite editor as we will be editing
-files later in this tutorial.
+files later in this tutorial.
```sh
yum install vim -y
@@ -137,7 +137,7 @@ point to talk about Prometheus's data model which can be viewed here: <https://p
As explained we have two key elements in Prometheus metrics. We have the _metric_ and its _labels_. Labels allow for
granularity between metrics. Let's use our previous example to further explain.
-```conf
+```text
netdata_disk_space_GiB_average{chart="disk_space._run",dimension="avail",family="/run",mount_point="/run",filesystem="tmpfs",mount_root="/"} 0.0298195 1684951093000
```
@@ -193,7 +193,7 @@ across a section of metrics with the first comments `# COMMENT homogeneous char
family "cpu", units "percentage"` followed by the metrics. This is a good start now let us drill down to the specific
metric we would like to graph.
-```conf
+```text
# COMMENT
netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1501275951 to 1501275951 inclusive
netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0000000 1501275951000
@@ -256,5 +256,3 @@ deployments automatically register Netdata services into Consul and Prometheus a
achieved you do not have to think about the monitoring system until Prometheus cannot keep up with your scale. Once this
happens there are options presented in the Prometheus documentation for solving this. Hope this was helpful, happy
monitoring.
-
-
diff --git a/src/exporting/aws_kinesis/integrations/aws_kinesis.md b/src/exporting/aws_kinesis/integrations/aws_kinesis.md
index 633729c74..f79c1008a 100644
--- a/src/exporting/aws_kinesis/integrations/aws_kinesis.md
+++ b/src/exporting/aws_kinesis/integrations/aws_kinesis.md
@@ -49,8 +49,8 @@ Export metrics to AWS Kinesis Data Streams
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/clean_connectors.c b/src/exporting/clean_connectors.c
index c850c5ffa..81413e661 100644
--- a/src/exporting/clean_connectors.c
+++ b/src/exporting/clean_connectors.c
@@ -67,9 +67,7 @@ void simple_connector_cleanup(struct instance *instance)
freez(current_buffer);
}
-#ifdef ENABLE_HTTPS
netdata_ssl_close(&simple_connector_data->ssl);
-#endif
freez(simple_connector_data);
diff --git a/src/exporting/exporting_engine.c b/src/exporting/exporting_engine.c
index eb5f8a0a8..7abe0b5ce 100644
--- a/src/exporting/exporting_engine.c
+++ b/src/exporting/exporting_engine.c
@@ -6,7 +6,6 @@ static struct engine *engine = NULL;
void analytics_exporting_connectors_ssl(BUFFER *b)
{
-#ifdef ENABLE_HTTPS
if (netdata_ssl_exporting_ctx) {
for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
struct simple_connector_data *connector_specific_data = instance->connector_specific_data;
@@ -16,7 +15,6 @@ void analytics_exporting_connectors_ssl(BUFFER *b)
}
}
}
-#endif
buffer_strcat(b, "|");
}
@@ -197,12 +195,11 @@ void *exporting_main(void *ptr)
RRDDIM *rd_main_system = NULL;
create_main_rusage_chart(&st_main_rusage, &rd_main_user, &rd_main_system);
- usec_t step_ut = localhost->rrd_update_every * USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, localhost->rrd_update_every * USEC_PER_SEC);
while (service_running(SERVICE_EXPORTERS)) {
- heartbeat_next(&hb, step_ut);
+ heartbeat_next(&hb);
engine->now = now_realtime_sec();
if (mark_scheduled_instances(engine))
diff --git a/src/exporting/exporting_engine.h b/src/exporting/exporting_engine.h
index beaa0ba87..44a2da322 100644
--- a/src/exporting/exporting_engine.h
+++ b/src/exporting/exporting_engine.h
@@ -124,9 +124,7 @@ struct simple_connector_data {
struct simple_connector_buffer *first_buffer;
struct simple_connector_buffer *last_buffer;
-#ifdef ENABLE_HTTPS
NETDATA_SSL ssl;
-#endif
};
struct prometheus_remote_write_specific_config {
diff --git a/src/exporting/graphite/graphite.c b/src/exporting/graphite/graphite.c
index 1fc1f2b04..a54339892 100644
--- a/src/exporting/graphite/graphite.c
+++ b/src/exporting/graphite/graphite.c
@@ -19,12 +19,10 @@ int init_graphite_instance(struct instance *instance)
struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
instance->connector_specific_data = connector_specific_data;
-#ifdef ENABLE_HTTPS
connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION;
if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX);
}
-#endif
instance->start_batch_formatting = NULL;
instance->start_host_formatting = format_host_labels_graphite_plaintext;
diff --git a/src/exporting/graphite/integrations/blueflood.md b/src/exporting/graphite/integrations/blueflood.md
index 56220fb6a..d65f9503b 100644
--- a/src/exporting/graphite/integrations/blueflood.md
+++ b/src/exporting/graphite/integrations/blueflood.md
@@ -37,8 +37,8 @@ further analysis, or correlation with data from other sources.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/graphite/integrations/graphite.md b/src/exporting/graphite/integrations/graphite.md
index c38b1aac4..0f2b91d16 100644
--- a/src/exporting/graphite/integrations/graphite.md
+++ b/src/exporting/graphite/integrations/graphite.md
@@ -37,8 +37,8 @@ further analysis, or correlation with data from other sources.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/graphite/integrations/influxdb.md b/src/exporting/graphite/integrations/influxdb.md
index 4d49febe0..6af2616a3 100644
--- a/src/exporting/graphite/integrations/influxdb.md
+++ b/src/exporting/graphite/integrations/influxdb.md
@@ -37,8 +37,8 @@ further analysis, or correlation with data from other sources.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/graphite/integrations/kairosdb.md b/src/exporting/graphite/integrations/kairosdb.md
index d5dad7f42..c2bd27bb8 100644
--- a/src/exporting/graphite/integrations/kairosdb.md
+++ b/src/exporting/graphite/integrations/kairosdb.md
@@ -37,8 +37,8 @@ further analysis, or correlation with data from other sources.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/json/integrations/json.md b/src/exporting/json/integrations/json.md
index 0b17aa318..b6d872492 100644
--- a/src/exporting/json/integrations/json.md
+++ b/src/exporting/json/integrations/json.md
@@ -36,8 +36,8 @@ further analysis, or correlation with data from other sources
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/json/json.c b/src/exporting/json/json.c
index e9c4db635..d696c7358 100644
--- a/src/exporting/json/json.c
+++ b/src/exporting/json/json.c
@@ -70,12 +70,10 @@ int init_json_http_instance(struct instance *instance)
struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
instance->connector_specific_data = connector_specific_data;
-#ifdef ENABLE_HTTPS
connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION;
if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX);
}
-#endif
instance->start_batch_formatting = open_batch_json_http;
instance->start_host_formatting = format_host_labels_json_plaintext;
diff --git a/src/exporting/mongodb/integrations/mongodb.md b/src/exporting/mongodb/integrations/mongodb.md
index c32ff5ee1..9d333bd09 100644
--- a/src/exporting/mongodb/integrations/mongodb.md
+++ b/src/exporting/mongodb/integrations/mongodb.md
@@ -26,7 +26,7 @@ for long-term storage, further analysis, or correlation with data from other sou
####
-- To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher.
+- To use MongoDB as an external storage for long-term archiving, you should first [install](https://www.mongodb.com/docs/languages/c/c-driver/current/libmongoc/tutorials/obtaining-libraries/installing/#std-label-installing) libmongoc 1.7.0 or higher.
- Next, re-install Netdata from the source, which detects that the required library is now available.
@@ -38,8 +38,8 @@ for long-term storage, further analysis, or correlation with data from other sou
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/mongodb/metadata.yaml b/src/exporting/mongodb/metadata.yaml
index 87aafc02d..6597df714 100644
--- a/src/exporting/mongodb/metadata.yaml
+++ b/src/exporting/mongodb/metadata.yaml
@@ -20,7 +20,7 @@ setup:
list:
- title: ''
description: |
- - To use MongoDB as an external storage for long-term archiving, you should first [install](http://mongoc.org/libmongoc/current/installing.html) libmongoc 1.7.0 or higher.
+ - To use MongoDB as an external storage for long-term archiving, you should first [install](https://www.mongodb.com/docs/languages/c/c-driver/current/libmongoc/tutorials/obtaining-libraries/installing/#std-label-installing) libmongoc 1.7.0 or higher.
- Next, re-install Netdata from the source, which detects that the required library is now available.
configuration:
file:
diff --git a/src/exporting/opentsdb/integrations/opentsdb.md b/src/exporting/opentsdb/integrations/opentsdb.md
index ddf8cdf25..6538e0c35 100644
--- a/src/exporting/opentsdb/integrations/opentsdb.md
+++ b/src/exporting/opentsdb/integrations/opentsdb.md
@@ -37,8 +37,8 @@ further analysis, or correlation with data from other sources.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/opentsdb/opentsdb.c b/src/exporting/opentsdb/opentsdb.c
index ab4495cb2..bee0f443a 100644
--- a/src/exporting/opentsdb/opentsdb.c
+++ b/src/exporting/opentsdb/opentsdb.c
@@ -20,12 +20,10 @@ int init_opentsdb_telnet_instance(struct instance *instance)
struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
instance->connector_specific_data = connector_specific_data;
-#ifdef ENABLE_HTTPS
connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION;
if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX);
}
-#endif
instance->start_batch_formatting = NULL;
instance->start_host_formatting = format_host_labels_opentsdb_telnet;
@@ -75,12 +73,10 @@ int init_opentsdb_http_instance(struct instance *instance)
connector_specific_config->default_port = 4242;
struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
-#ifdef ENABLE_HTTPS
connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION;
if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX);
}
-#endif
instance->connector_specific_data = connector_specific_data;
instance->start_batch_formatting = open_batch_json_http;
diff --git a/src/exporting/prometheus/README.md b/src/exporting/prometheus/README.md
index 81e62b7ec..cc35b1d42 100644
--- a/src/exporting/prometheus/README.md
+++ b/src/exporting/prometheus/README.md
@@ -203,7 +203,7 @@ interrupts, QoS classes, statsd synthetic charts, etc.
The default is controlled in `exporting.conf`:
-```conf
+```text
[prometheus:exporter]
send names instead of ids = yes | no
```
@@ -217,7 +217,7 @@ You can overwrite it from Prometheus, by appending to the URL:
Netdata can filter the metrics it sends to Prometheus with this setting:
-```conf
+```text
[prometheus:exporter]
send charts matching = *
```
@@ -233,7 +233,7 @@ used.
Netdata sends all metrics prefixed with `netdata_`. You can change this in `netdata.conf`, like this:
-```conf
+```text
[prometheus:exporter]
prefix = netdata
```
diff --git a/src/exporting/prometheus/integrations/appoptics.md b/src/exporting/prometheus/integrations/appoptics.md
index 73ed5c843..35aabe56e 100644
--- a/src/exporting/prometheus/integrations/appoptics.md
+++ b/src/exporting/prometheus/integrations/appoptics.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/azure_data_explorer.md b/src/exporting/prometheus/integrations/azure_data_explorer.md
index 8acbef88a..3df0a700c 100644
--- a/src/exporting/prometheus/integrations/azure_data_explorer.md
+++ b/src/exporting/prometheus/integrations/azure_data_explorer.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/azure_event_hub.md b/src/exporting/prometheus/integrations/azure_event_hub.md
index 42e2a0515..ac643988c 100644
--- a/src/exporting/prometheus/integrations/azure_event_hub.md
+++ b/src/exporting/prometheus/integrations/azure_event_hub.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/chronix.md b/src/exporting/prometheus/integrations/chronix.md
index c7d315b79..519851a2b 100644
--- a/src/exporting/prometheus/integrations/chronix.md
+++ b/src/exporting/prometheus/integrations/chronix.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/cortex.md b/src/exporting/prometheus/integrations/cortex.md
index 91fe3946d..4ab5122dc 100644
--- a/src/exporting/prometheus/integrations/cortex.md
+++ b/src/exporting/prometheus/integrations/cortex.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/cratedb.md b/src/exporting/prometheus/integrations/cratedb.md
index 87f30bc79..5d79e41ee 100644
--- a/src/exporting/prometheus/integrations/cratedb.md
+++ b/src/exporting/prometheus/integrations/cratedb.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/elasticsearch.md b/src/exporting/prometheus/integrations/elasticsearch.md
index 42fac5f67..b900691cb 100644
--- a/src/exporting/prometheus/integrations/elasticsearch.md
+++ b/src/exporting/prometheus/integrations/elasticsearch.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/gnocchi.md b/src/exporting/prometheus/integrations/gnocchi.md
index 457adefc8..154edfcda 100644
--- a/src/exporting/prometheus/integrations/gnocchi.md
+++ b/src/exporting/prometheus/integrations/gnocchi.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/google_bigquery.md b/src/exporting/prometheus/integrations/google_bigquery.md
index c9cb54cc7..7331bb737 100644
--- a/src/exporting/prometheus/integrations/google_bigquery.md
+++ b/src/exporting/prometheus/integrations/google_bigquery.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/greptimedb.md b/src/exporting/prometheus/integrations/greptimedb.md
index cf1453eeb..d660b9c8c 100644
--- a/src/exporting/prometheus/integrations/greptimedb.md
+++ b/src/exporting/prometheus/integrations/greptimedb.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/irondb.md b/src/exporting/prometheus/integrations/irondb.md
index 6ab7c8f06..9a6e387e8 100644
--- a/src/exporting/prometheus/integrations/irondb.md
+++ b/src/exporting/prometheus/integrations/irondb.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/kafka.md b/src/exporting/prometheus/integrations/kafka.md
index 207f292ff..2c315c60c 100644
--- a/src/exporting/prometheus/integrations/kafka.md
+++ b/src/exporting/prometheus/integrations/kafka.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/m3db.md b/src/exporting/prometheus/integrations/m3db.md
index 75ff05b5d..7b5621cfc 100644
--- a/src/exporting/prometheus/integrations/m3db.md
+++ b/src/exporting/prometheus/integrations/m3db.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/metricfire.md b/src/exporting/prometheus/integrations/metricfire.md
index 8e8797ca9..1ff9619f5 100644
--- a/src/exporting/prometheus/integrations/metricfire.md
+++ b/src/exporting/prometheus/integrations/metricfire.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/new_relic.md b/src/exporting/prometheus/integrations/new_relic.md
index 7ecedd497..11fbb3345 100644
--- a/src/exporting/prometheus/integrations/new_relic.md
+++ b/src/exporting/prometheus/integrations/new_relic.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/opeansearch.md b/src/exporting/prometheus/integrations/opeansearch.md
index 77c494284..a76e738ab 100644
--- a/src/exporting/prometheus/integrations/opeansearch.md
+++ b/src/exporting/prometheus/integrations/opeansearch.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/postgresql.md b/src/exporting/prometheus/integrations/postgresql.md
index 4a899b5d4..94ace1dcf 100644
--- a/src/exporting/prometheus/integrations/postgresql.md
+++ b/src/exporting/prometheus/integrations/postgresql.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/prometheus_remote_write.md b/src/exporting/prometheus/integrations/prometheus_remote_write.md
index 6b073d511..296e7fa0a 100644
--- a/src/exporting/prometheus/integrations/prometheus_remote_write.md
+++ b/src/exporting/prometheus/integrations/prometheus_remote_write.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/quasardb.md b/src/exporting/prometheus/integrations/quasardb.md
index 4682f0800..a570e949c 100644
--- a/src/exporting/prometheus/integrations/quasardb.md
+++ b/src/exporting/prometheus/integrations/quasardb.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/splunk_signalfx.md b/src/exporting/prometheus/integrations/splunk_signalfx.md
index 792808817..f464e19b8 100644
--- a/src/exporting/prometheus/integrations/splunk_signalfx.md
+++ b/src/exporting/prometheus/integrations/splunk_signalfx.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/thanos.md b/src/exporting/prometheus/integrations/thanos.md
index de61e29a6..e0ddcd8db 100644
--- a/src/exporting/prometheus/integrations/thanos.md
+++ b/src/exporting/prometheus/integrations/thanos.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/tikv.md b/src/exporting/prometheus/integrations/tikv.md
index 74a62938c..1d2dbfc0c 100644
--- a/src/exporting/prometheus/integrations/tikv.md
+++ b/src/exporting/prometheus/integrations/tikv.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/timescaledb.md b/src/exporting/prometheus/integrations/timescaledb.md
index 56a8fd49b..9d173e752 100644
--- a/src/exporting/prometheus/integrations/timescaledb.md
+++ b/src/exporting/prometheus/integrations/timescaledb.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/victoriametrics.md b/src/exporting/prometheus/integrations/victoriametrics.md
index c2667ea73..57dfebbd7 100644
--- a/src/exporting/prometheus/integrations/victoriametrics.md
+++ b/src/exporting/prometheus/integrations/victoriametrics.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/vmware_aria.md b/src/exporting/prometheus/integrations/vmware_aria.md
index 6015c398e..36c488f69 100644
--- a/src/exporting/prometheus/integrations/vmware_aria.md
+++ b/src/exporting/prometheus/integrations/vmware_aria.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/integrations/wavefront.md b/src/exporting/prometheus/integrations/wavefront.md
index 1803d30a6..58ae31c84 100644
--- a/src/exporting/prometheus/integrations/wavefront.md
+++ b/src/exporting/prometheus/integrations/wavefront.md
@@ -42,8 +42,8 @@ The remote write exporting connector does not support buffer on failures.
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/prometheus/prometheus.c b/src/exporting/prometheus/prometheus.c
index 0ba83a939..92d2fe64b 100644
--- a/src/exporting/prometheus/prometheus.c
+++ b/src/exporting/prometheus/prometheus.c
@@ -88,9 +88,8 @@ static netdata_mutex_t prometheus_server_root_mutex = NETDATA_MUTEX_INITIALIZER;
*/
void prometheus_clean_server_root()
{
+ netdata_mutex_lock(&prometheus_server_root_mutex);
if (prometheus_server_root) {
- netdata_mutex_lock(&prometheus_server_root_mutex);
-
struct prometheus_server *ps;
for (ps = prometheus_server_root; ps; ) {
struct prometheus_server *current = ps;
@@ -101,8 +100,8 @@ void prometheus_clean_server_root()
freez(current);
}
prometheus_server_root = NULL;
- netdata_mutex_unlock(&prometheus_server_root_mutex);
}
+ netdata_mutex_unlock(&prometheus_server_root_mutex);
}
/**
@@ -149,24 +148,11 @@ static inline time_t prometheus_server_last_access(const char *server, RRDHOST *
*
* @param d a destination string.
* @param s a source string.
- * @param usable the number of characters to copy.
+ * @param size the number of characters to copy.
* @return Returns the length of the copied string.
*/
-inline size_t prometheus_name_copy(char *d, const char *s, size_t usable)
-{
- size_t n;
-
- for (n = 0; *s && n < usable; d++, s++, n++) {
- register char c = *s;
-
- if (!isalnum(c))
- *d = '_';
- else
- *d = c;
- }
- *d = '\0';
-
- return n;
+inline void prometheus_name_copy(char *d, const char *s, size_t size) {
+ prometheus_rrdlabels_sanitize_name(d, s, size);
}
/**
@@ -174,28 +160,13 @@ inline size_t prometheus_name_copy(char *d, const char *s, size_t usable)
*
* @param d a destination string.
* @param s a source string.
- * @param usable the number of characters to copy.
+ * @param size the number of characters to copy.
* @return Returns the length of the copied string.
*/
-inline size_t prometheus_label_copy(char *d, const char *s, size_t usable)
-{
- size_t n;
-
- // make sure we can escape one character without overflowing the buffer
- usable--;
-
- for (n = 0; *s && n < usable; d++, s++, n++) {
- register char c = *s;
-
- if (unlikely(c == '"' || c == '\\' || c == '\n')) {
- *d++ = '\\';
- n++;
- }
- *d = c;
- }
- *d = '\0';
-
- return n;
+inline void prometheus_label_copy(char *d, const char *s, size_t size) {
+ // our label values are already compatible with prometheus label values
+ // so, just copy them
+ strncpyz(d, s, size - 1);
}
/**
@@ -299,8 +270,8 @@ static int format_prometheus_label_callback(const char *name, const char *value,
char k[PROMETHEUS_ELEMENT_MAX + 1];
char v[PROMETHEUS_ELEMENT_MAX + 1];
- prometheus_name_copy(k, name, PROMETHEUS_ELEMENT_MAX);
- prometheus_label_copy(v, value, PROMETHEUS_ELEMENT_MAX);
+ prometheus_name_copy(k, name, sizeof(k));
+ prometheus_label_copy(v, value, sizeof(v));
if (*k && *v) {
if (d->count > 0) buffer_strcat(d->instance->labels_buffer, ",");
@@ -341,8 +312,8 @@ static int format_prometheus_chart_label_callback(const char *name, const char *
char k[PROMETHEUS_ELEMENT_MAX + 1];
char v[PROMETHEUS_ELEMENT_MAX + 1];
- prometheus_name_copy(k, name, PROMETHEUS_ELEMENT_MAX);
- prometheus_label_copy(v, value, PROMETHEUS_ELEMENT_MAX);
+ prometheus_name_copy(k, name, sizeof(k));
+ prometheus_label_copy(v, value, sizeof(v));
if (*k && *v)
buffer_sprintf(wb, ",%s=\"%s\"", k, v);
@@ -630,9 +601,9 @@ static int prometheus_rrdset_to_json(RRDSET *st, void *data)
prometheus_label_copy(chart,
(output_options & PROMETHEUS_OUTPUT_NAMES && st->name) ?
- rrdset_name(st) : rrdset_id(st), PROMETHEUS_ELEMENT_MAX);
- prometheus_label_copy(family, rrdset_family(st), PROMETHEUS_ELEMENT_MAX);
- prometheus_name_copy(context, rrdset_context(st), PROMETHEUS_ELEMENT_MAX);
+ rrdset_name(st) : rrdset_id(st), sizeof(chart));
+ prometheus_label_copy(family, rrdset_family(st), sizeof(family));
+ prometheus_name_copy(context, rrdset_context(st), sizeof(context));
int as_collected = (EXPORTING_OPTIONS_DATA_SOURCE(opts->exporting_options)
== EXPORTING_SOURCE_DATA_AS_COLLECTED);
@@ -708,7 +679,7 @@ static int prometheus_rrdset_to_json(RRDSET *st, void *data)
prometheus_label_copy(
dimension,
(output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd),
- PROMETHEUS_ELEMENT_MAX);
+ sizeof(dimension));
}
else {
// the dimensions of the chart, do not have the same algorithm, multiplier or divisor
@@ -717,7 +688,7 @@ static int prometheus_rrdset_to_json(RRDSET *st, void *data)
prometheus_name_copy(
dimension,
(output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd),
- PROMETHEUS_ELEMENT_MAX);
+ sizeof(dimension));
}
generate_as_collected_from_metric(wb, &p, homogeneous, prometheus_collector, st->rrdlabels);
}
@@ -738,7 +709,7 @@ static int prometheus_rrdset_to_json(RRDSET *st, void *data)
prometheus_label_copy(
dimension,
(output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd),
- PROMETHEUS_ELEMENT_MAX);
+ sizeof(dimension));
if (opts->output_options & PROMETHEUS_OUTPUT_HELP_TYPE) {
generate_as_collected_prom_help(wb, prefix, context, units, suffix, st);
@@ -837,7 +808,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT, true);
char hostname[PROMETHEUS_ELEMENT_MAX + 1];
- prometheus_label_copy(hostname, rrdhost_hostname(host), PROMETHEUS_ELEMENT_MAX);
+ prometheus_label_copy(hostname, rrdhost_hostname(host), sizeof(hostname));
format_host_labels_prometheus(instance, host);
diff --git a/src/exporting/prometheus/prometheus.h b/src/exporting/prometheus/prometheus.h
index 0a537fd77..f5b0e4874 100644
--- a/src/exporting/prometheus/prometheus.h
+++ b/src/exporting/prometheus/prometheus.h
@@ -27,8 +27,8 @@ void rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
EXPORTING_OPTIONS exporting_options, PROMETHEUS_OUTPUT_OPTIONS output_options);
int can_send_rrdset(struct instance *instance, RRDSET *st, SIMPLE_PATTERN *filter);
-size_t prometheus_name_copy(char *d, const char *s, size_t usable);
-size_t prometheus_label_copy(char *d, const char *s, size_t usable);
+void prometheus_name_copy(char *d, const char *s, size_t size);
+void prometheus_label_copy(char *d, const char *s, size_t size);
char *prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits);
void format_host_labels_prometheus(struct instance *instance, RRDHOST *host);
diff --git a/src/exporting/prometheus/remote_write/remote_write.c b/src/exporting/prometheus/remote_write/remote_write.c
index b4b6f996b..fc8c5b3ba 100644
--- a/src/exporting/prometheus/remote_write/remote_write.c
+++ b/src/exporting/prometheus/remote_write/remote_write.c
@@ -114,12 +114,10 @@ int init_prometheus_remote_write_instance(struct instance *instance)
struct simple_connector_data *simple_connector_data = callocz(1, sizeof(struct simple_connector_data));
instance->connector_specific_data = simple_connector_data;
-#ifdef ENABLE_HTTPS
simple_connector_data->ssl = NETDATA_SSL_UNSET_CONNECTION;
if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX);
}
-#endif
struct prometheus_remote_write_specific_data *connector_specific_data =
callocz(1, sizeof(struct prometheus_remote_write_specific_data));
@@ -147,8 +145,8 @@ static int format_remote_write_label_callback(const char *name, const char *valu
char k[PROMETHEUS_ELEMENT_MAX + 1];
char v[PROMETHEUS_ELEMENT_MAX + 1];
- prometheus_name_copy(k, name, PROMETHEUS_ELEMENT_MAX);
- prometheus_label_copy(v, value, PROMETHEUS_ELEMENT_MAX);
+ prometheus_name_copy(k, name, sizeof(k));
+ prometheus_label_copy(v, value, sizeof(v));
add_label(d->write_request, k, v);
return 1;
}
@@ -171,7 +169,7 @@ int format_host_prometheus_remote_write(struct instance *instance, RRDHOST *host
prometheus_label_copy(
hostname,
(host == localhost) ? instance->config.hostname : rrdhost_hostname(host),
- PROMETHEUS_ELEMENT_MAX);
+ sizeof(hostname));
add_host_info(
connector_specific_data->write_request,
@@ -200,9 +198,9 @@ int format_chart_prometheus_remote_write(struct instance *instance, RRDSET *st)
prometheus_label_copy(
chart,
(instance->config.options & EXPORTING_OPTION_SEND_NAMES && st->name) ? rrdset_name(st) : rrdset_id(st),
- PROMETHEUS_ELEMENT_MAX);
- prometheus_label_copy(family, rrdset_family(st), PROMETHEUS_ELEMENT_MAX);
- prometheus_name_copy(context, rrdset_context(st), PROMETHEUS_ELEMENT_MAX);
+ sizeof(chart));
+ prometheus_label_copy(family, rrdset_family(st), sizeof(family));
+ prometheus_name_copy(context, rrdset_context(st), sizeof(context));
as_collected = (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED);
homogeneous = 1;
@@ -268,7 +266,7 @@ int format_dimension_prometheus_remote_write(struct instance *instance, RRDDIM *
prometheus_label_copy(
dimension,
(instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd),
- PROMETHEUS_ELEMENT_MAX);
+ sizeof(dimension));
snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", instance->config.prefix, context, suffix);
add_metric(
@@ -283,9 +281,9 @@ int format_dimension_prometheus_remote_write(struct instance *instance, RRDDIM *
prometheus_name_copy(
dimension,
(instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd),
- PROMETHEUS_ELEMENT_MAX);
+ sizeof(dimension));
snprintf(
- name, PROMETHEUS_LABELS_MAX, "%s_%s_%s%s", instance->config.prefix, context, dimension,
+ name, sizeof(name), "%s_%s_%s%s", instance->config.prefix, context, dimension,
suffix);
add_metric(
@@ -309,7 +307,7 @@ int format_dimension_prometheus_remote_write(struct instance *instance, RRDDIM *
prometheus_label_copy(
dimension,
(instance->config.options & EXPORTING_OPTION_SEND_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd),
- PROMETHEUS_ELEMENT_MAX);
+ sizeof(dimension));
snprintf(
name, PROMETHEUS_LABELS_MAX, "%s_%s%s%s", instance->config.prefix, context, units, suffix);
@@ -340,8 +338,8 @@ static int format_variable_prometheus_remote_write_callback(const DICTIONARY_ITE
char name[PROMETHEUS_LABELS_MAX + 1];
char *suffix = "";
- prometheus_name_copy(context, rrdvar_name(rv), PROMETHEUS_ELEMENT_MAX);
- snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", instance->config.prefix, context, suffix);
+ prometheus_name_copy(context, rrdvar_name(rv), sizeof(context));
+ snprintf(name, sizeof(name), "%s_%s%s", instance->config.prefix, context, suffix);
NETDATA_DOUBLE value = rrdvar2number(rv);
add_variable(connector_specific_data->write_request, name,
diff --git a/src/exporting/pubsub/integrations/google_cloud_pub_sub.md b/src/exporting/pubsub/integrations/google_cloud_pub_sub.md
index 1adfd408e..f51c90af8 100644
--- a/src/exporting/pubsub/integrations/google_cloud_pub_sub.md
+++ b/src/exporting/pubsub/integrations/google_cloud_pub_sub.md
@@ -38,8 +38,8 @@ Export metrics to Google Cloud Pub/Sub Service
The configuration file name for this integration is `exporting.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/exporting/read_config.c b/src/exporting/read_config.c
index cd8af6bf6..13fe10883 100644
--- a/src/exporting/read_config.c
+++ b/src/exporting/read_config.c
@@ -5,11 +5,7 @@
EXPORTING_OPTIONS global_exporting_options = EXPORTING_SOURCE_DATA_AVERAGE | EXPORTING_OPTION_SEND_NAMES;
const char *global_exporting_prefix = "netdata";
-struct config exporting_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config exporting_config = APPCONFIG_INITIALIZER;
struct instance *prometheus_exporter_instance = NULL;
@@ -32,7 +28,7 @@ static _CONNECTOR_INSTANCE *find_instance(const char *section)
return local_ci;
}
-char *expconfig_get(struct config *root, const char *section, const char *name, const char *default_value)
+static const char *expconfig_get(struct config *root, const char *section, const char *name, const char *default_value)
{
_CONNECTOR_INSTANCE *local_ci;
@@ -207,14 +203,14 @@ struct engine *read_exporting_config()
if (unlikely(engine))
return engine;
- char *filename = strdupz_path_subpath(netdata_configured_user_config_dir, EXPORTING_CONF);
+ char *filename = filename_from_path_entry_strdupz(netdata_configured_user_config_dir, EXPORTING_CONF);
exporting_config_exists = appconfig_load(&exporting_config, filename, 0, NULL);
if (!exporting_config_exists) {
netdata_log_info("CONFIG: cannot load user exporting config '%s'. Will try the stock version.", filename);
freez(filename);
- filename = strdupz_path_subpath(netdata_configured_stock_config_dir, EXPORTING_CONF);
+ filename = filename_from_path_entry_strdupz(netdata_configured_stock_config_dir, EXPORTING_CONF);
exporting_config_exists = appconfig_load(&exporting_config, filename, 0, NULL);
if (!exporting_config_exists)
netdata_log_info("CONFIG: cannot load stock exporting config '%s'. Running with internal defaults.", filename);
@@ -243,7 +239,7 @@ struct engine *read_exporting_config()
prometheus_exporter_instance->config.options |= global_exporting_options & EXPORTING_OPTIONS_SOURCE_BITS;
- char *data_source = prometheus_config_get("data source", "average");
+ const char *data_source = prometheus_config_get("data source", "average");
prometheus_exporter_instance->config.options =
exporting_parse_data_source(data_source, prometheus_exporter_instance->config.options);
@@ -378,7 +374,7 @@ struct engine *read_exporting_config()
tmp_instance->config.hosts_pattern = simple_pattern_create(
exporter_get(instance_name, "send hosts matching", "localhost *"), NULL, SIMPLE_PATTERN_EXACT, true);
- char *data_source = exporter_get(instance_name, "data source", "average");
+ const char *data_source = exporter_get(instance_name, "data source", "average");
tmp_instance->config.options = exporting_parse_data_source(data_source, tmp_instance->config.options);
if (EXPORTING_OPTIONS_DATA_SOURCE(tmp_instance->config.options) != EXPORTING_SOURCE_DATA_AS_COLLECTED &&
@@ -468,8 +464,6 @@ struct engine *read_exporting_config()
tmp_instance->config.hostname = strdupz(exporter_get(instance_name, "hostname", engine->config.hostname));
-#ifdef ENABLE_HTTPS
-
#define STR_GRAPHITE_HTTPS "graphite:https"
#define STR_JSON_HTTPS "json:https"
#define STR_OPENTSDB_HTTPS "opentsdb:https"
@@ -487,7 +481,6 @@ struct engine *read_exporting_config()
strlen(STR_PROMETHEUS_REMOTE_WRITE_HTTPS)))) {
tmp_instance->config.options |= EXPORTING_OPTION_USE_TLS;
}
-#endif
#ifdef NETDATA_INTERNAL_CHECKS
netdata_log_info(
diff --git a/src/exporting/send_data.c b/src/exporting/send_data.c
index 097b7fd4b..02e9c7b29 100644
--- a/src/exporting/send_data.c
+++ b/src/exporting/send_data.c
@@ -2,7 +2,6 @@
#include "exporting_engine.h"
-#ifdef ENABLE_HTTPS
/**
* Check if TLS is enabled in the configuration
*
@@ -19,7 +18,6 @@ static int exporting_tls_is_enabled(EXPORTING_CONNECTOR_TYPE type __maybe_unused
type == EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE) &&
options & EXPORTING_OPTION_USE_TLS;
}
-#endif
/**
* Discard response
@@ -69,28 +67,23 @@ void simple_connector_receive_response(int *sock, struct instance *instance)
response = buffer_create(4096, &netdata_buffers_statistics.buffers_exporters);
struct stats *stats = &instance->stats;
-#ifdef ENABLE_HTTPS
uint32_t options = (uint32_t)instance->config.options;
struct simple_connector_data *connector_specific_data = instance->connector_specific_data;
if (options & EXPORTING_OPTION_USE_TLS)
ERR_clear_error();
-#endif
errno_clear();
// loop through to collect all data
while (*sock != -1 && errno != EWOULDBLOCK) {
ssize_t r;
-#ifdef ENABLE_HTTPS
if (SSL_connection(&connector_specific_data->ssl))
r = netdata_ssl_read(&connector_specific_data->ssl, &response->buffer[response->len],
(int) (response->size - response->len));
else
r = recv(*sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT);
-#else
- r = recv(*sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT);
-#endif
+
if (likely(r > 0)) {
// we received some data
response->len += r;
@@ -136,13 +129,11 @@ void simple_connector_send_buffer(
flags += MSG_NOSIGNAL;
#endif
-#ifdef ENABLE_HTTPS
uint32_t options = (uint32_t)instance->config.options;
struct simple_connector_data *connector_specific_data = instance->connector_specific_data;
if (options & EXPORTING_OPTION_USE_TLS)
ERR_clear_error();
-#endif
struct stats *stats = &instance->stats;
ssize_t header_sent_bytes = 0;
@@ -150,7 +141,6 @@ void simple_connector_send_buffer(
size_t header_len = buffer_strlen(header);
size_t buffer_len = buffer_strlen(buffer);
-#ifdef ENABLE_HTTPS
if (SSL_connection(&connector_specific_data->ssl)) {
if (header_len)
@@ -166,12 +156,6 @@ void simple_connector_send_buffer(
if ((size_t)header_sent_bytes == header_len)
buffer_sent_bytes = send(*sock, buffer_tostring(buffer), buffer_len, flags);
}
-#else
- if (header_len)
- header_sent_bytes = send(*sock, buffer_tostring(header), header_len, flags);
- if ((size_t)header_sent_bytes == header_len)
- buffer_sent_bytes = send(*sock, buffer_tostring(buffer), buffer_len, flags);
-#endif
if ((size_t)buffer_sent_bytes == buffer_len) {
// we sent the data successfully
@@ -221,12 +205,11 @@ void simple_connector_worker(void *instance_p)
snprintfz(threadname, ND_THREAD_TAG_MAX, "EXPSMPL[%zu]", instance->index);
uv_thread_set_name_np(threadname);
-#ifdef ENABLE_HTTPS
uint32_t options = (uint32_t)instance->config.options;
if (options & EXPORTING_OPTION_USE_TLS)
ERR_clear_error();
-#endif
+
struct simple_connector_config *connector_specific_config = instance->config.connector_specific_config;
int sock = -1;
@@ -303,7 +286,7 @@ void simple_connector_worker(void *instance_p)
&reconnects,
connector_specific_data->connected_to,
CONNECTED_TO_MAX);
-#ifdef ENABLE_HTTPS
+
if (exporting_tls_is_enabled(instance->config.type, options) && sock != -1) {
if (netdata_ssl_exporting_ctx) {
if (sock_delnonblock(sock) < 0)
@@ -326,7 +309,6 @@ void simple_connector_worker(void *instance_p)
}
}
}
-#endif
stats->reconnects += reconnects;
}