summaryrefslogtreecommitdiffstats
path: root/exporting
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2021-12-01 06:15:11 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2021-12-01 06:15:11 +0000
commit483926a283e118590da3f9ecfa75a8a4d62143ce (patch)
treecb77052778df9a128a8cd3ff5bf7645322a13bc5 /exporting
parentReleasing debian version 1.31.0-4. (diff)
downloadnetdata-483926a283e118590da3f9ecfa75a8a4d62143ce.tar.xz
netdata-483926a283e118590da3f9ecfa75a8a4d62143ce.zip
Merging upstream version 1.32.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'exporting')
-rw-r--r--exporting/README.md2
-rw-r--r--exporting/WALKTHROUGH.md12
-rw-r--r--exporting/check_filters.c2
-rw-r--r--exporting/clean_connectors.c4
-rw-r--r--exporting/exporting.conf5
-rw-r--r--exporting/exporting_engine.c16
-rw-r--r--exporting/exporting_engine.h4
-rw-r--r--exporting/graphite/README.md7
-rw-r--r--exporting/graphite/graphite.c2
-rw-r--r--exporting/init_connectors.c73
-rw-r--r--exporting/json/README.md7
-rw-r--r--exporting/json/json.c2
-rw-r--r--exporting/mongodb/mongodb.c2
-rw-r--r--exporting/opentsdb/README.md7
-rw-r--r--exporting/opentsdb/opentsdb.c2
-rw-r--r--exporting/process_data.c2
-rw-r--r--exporting/prometheus/README.md70
-rw-r--r--exporting/prometheus/prometheus.c8
-rw-r--r--exporting/prometheus/remote_write/README.md7
-rw-r--r--exporting/prometheus/remote_write/remote_write.c2
-rw-r--r--exporting/read_config.c4
-rw-r--r--exporting/tests/exporting_doubles.c2
-rw-r--r--exporting/tests/exporting_fixtures.c2
-rw-r--r--exporting/tests/test_exporting_engine.c24
24 files changed, 207 insertions, 61 deletions
diff --git a/exporting/README.md b/exporting/README.md
index 933de0e0..ef485bb1 100644
--- a/exporting/README.md
+++ b/exporting/README.md
@@ -164,6 +164,8 @@ You can configure each connector individually using the available [options](#opt
[opentsdb:http:my_opentsdb_http_instance]
enabled = yes
destination = localhost:4242
+ username = my_username
+ password = my_password
[opentsdb:https:my_opentsdb_https_instance]
enabled = yes
diff --git a/exporting/WALKTHROUGH.md b/exporting/WALKTHROUGH.md
index ac171291..24afd209 100644
--- a/exporting/WALKTHROUGH.md
+++ b/exporting/WALKTHROUGH.md
@@ -178,14 +178,14 @@ Prometheus's homepage and begin to type `netdata\_` Prometheus should auto compl
![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.13.43%20PM.png)
-Let's now start exploring how we can graph some metrics. Back in our NetData container lets get the CPU spinning with a
+Let's now start exploring how we can graph some metrics. Back in our Netdata container lets get the CPU spinning with a
pointless busy loop. On the shell do the following:
```sh
[root@netdata /]# while true; do echo "HOT HOT HOT CPU"; done
```
-Our NetData cpu graph should be showing some activity. Let's represent this in Prometheus. In order to do this let's
+Our Netdata cpu graph should be showing some activity. Let's represent this in Prometheus. In order to do this let's
keep our metrics page open for reference: <http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes>. We are
setting out to graph the data in the CPU chart so let's search for `system.cpu` in the metrics page above. We come
across a section of metrics with the first comments `# COMMENT homogeneous chart "system.cpu", context "system.cpu",
@@ -211,18 +211,18 @@ query the dimension also. Place this into our query text box.
![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.54.40%20PM.png)
-Awesome, this is exactly what we wanted. If you haven't caught on yet we can emulate entire charts from NetData by using
+Awesome, this is exactly what we wanted. If you haven't caught on yet we can emulate entire charts from Netdata by using
the `chart` dimension. If you'd like you can combine the `chart` and `instance` dimension to create per-instance charts.
Let's give this a try: `netdata_system_cpu_percentage_average{chart="system.cpu", instance="netdata:19999"}`
-This is the basics of using Prometheus to query NetData. I'd advise everyone at this point to read [this
-page](/exporting/prometheus/#using-netdata-with-prometheus). The key point here is that NetData can export metrics from
+This is the basics of using Prometheus to query Netdata. I'd advise everyone at this point to read [this
+page](/exporting/prometheus/#using-netdata-with-prometheus). The key point here is that Netdata can export metrics from
its internal DB or can send metrics _as-collected_ by specifying the `source=as-collected` URL parameter like so.
<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes&types=yes&source=as-collected> If you choose to use
this method you will need to use Prometheus's set of functions here: <https://prometheus.io/docs/querying/functions/> to
obtain useful metrics as you are now dealing with raw counters from the system. For example you will have to use the
`irate()` function over a counter to get that metric's rate per second. If your graphing needs are met by using the
-metrics returned by NetData's internal database (not specifying any source= URL parameter) then use that. If you find
+metrics returned by Netdata's internal database (not specifying any source= URL parameter) then use that. If you find
limitations then consider re-writing your queries using the raw data and using Prometheus functions to get the desired
chart.
diff --git a/exporting/check_filters.c b/exporting/check_filters.c
index 64ced723..d2d7d870 100644
--- a/exporting/check_filters.c
+++ b/exporting/check_filters.c
@@ -43,7 +43,9 @@ int rrdhost_is_exportable(struct instance *instance, RRDHOST *host)
*/
int rrdset_is_exportable(struct instance *instance, RRDSET *st)
{
+#ifdef NETDATA_INTERNAL_CHECKS
RRDHOST *host = st->rrdhost;
+#endif
if (st->exporting_flags == NULL)
st->exporting_flags = callocz(instance->engine->instance_num, sizeof(size_t));
diff --git a/exporting/clean_connectors.c b/exporting/clean_connectors.c
index 890e8daa..4af1219a 100644
--- a/exporting/clean_connectors.c
+++ b/exporting/clean_connectors.c
@@ -15,6 +15,8 @@ static void clean_instance_config(struct instance_config *config)
freez((void *)config->type_name);
freez((void *)config->name);
freez((void *)config->destination);
+ freez((void *)config->username);
+ freez((void *)config->password);
freez((void *)config->prefix);
freez((void *)config->hostname);
@@ -49,6 +51,8 @@ void simple_connector_cleanup(struct instance *instance)
struct simple_connector_data *simple_connector_data =
(struct simple_connector_data *)instance->connector_specific_data;
+ freez(simple_connector_data->auth_string);
+
buffer_free(instance->buffer);
buffer_free(simple_connector_data->buffer);
buffer_free(simple_connector_data->header);
diff --git a/exporting/exporting.conf b/exporting/exporting.conf
index c2e902c0..314e1541 100644
--- a/exporting/exporting.conf
+++ b/exporting/exporting.conf
@@ -17,6 +17,9 @@
# [graphite:my_graphite_instance]
# enabled = no
# destination = localhost
+ # Credentials for basic HTTP authentication
+ # username = my_username
+ # password = my_password
# data source = average
# prefix = netdata
# hostname = my_hostname
@@ -31,6 +34,8 @@
# enabled = no
# destination = localhost
# remote write URL path = /receive
+ # username = my_username
+ # password = my_password
# data source = average
# prefix = netdata
# hostname = my_hostname
diff --git a/exporting/exporting_engine.c b/exporting/exporting_engine.c
index 70aceea8..faace86d 100644
--- a/exporting/exporting_engine.c
+++ b/exporting/exporting_engine.c
@@ -4,6 +4,22 @@
static struct engine *engine = NULL;
+void analytics_exporting_connectors_ssl(BUFFER *b)
+{
+#ifdef ENABLE_HTTPS
+ if (netdata_exporting_ctx) {
+ for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
+ struct simple_connector_data *connector_specific_data = instance->connector_specific_data;
+ if (connector_specific_data->flags == NETDATA_SSL_HANDSHAKE_COMPLETE) {
+ buffer_strcat(b, "exporting");
+ break;
+ }
+ }
+ }
+#endif
+ buffer_strcat(b, "|");
+}
+
void analytics_exporting_connectors(BUFFER *b)
{
if (!engine)
diff --git a/exporting/exporting_engine.h b/exporting/exporting_engine.h
index 1ad6e685..f08583fb 100644
--- a/exporting/exporting_engine.h
+++ b/exporting/exporting_engine.h
@@ -66,6 +66,8 @@ struct instance_config {
const char *name;
const char *destination;
+ const char *username;
+ const char *password;
const char *prefix;
const char *hostname;
@@ -104,6 +106,8 @@ struct simple_connector_data {
void *connector_specific_data;
char connected_to[CONNECTED_TO_MAX];
+
+ char *auth_string;
size_t total_buffered_metrics;
diff --git a/exporting/graphite/README.md b/exporting/graphite/README.md
index a6a25ef7..d755e093 100644
--- a/exporting/graphite/README.md
+++ b/exporting/graphite/README.md
@@ -22,7 +22,12 @@ directory and set the following options:
```
Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For example: `graphite:http:my_graphite_instance`,
-`graphite:https:my_graphite_instance`.
+`graphite:https:my_graphite_instance`. You can set basic HTTP authentication credentials using
+
+```conf
+ username = my_username
+ password = my_password
+```
The Graphite connector is further configurable using additional settings. See the [exporting reference
doc](/exporting/README.md#options) for details.
diff --git a/exporting/graphite/graphite.c b/exporting/graphite/graphite.c
index 722db0ff..84d4febf 100644
--- a/exporting/graphite/graphite.c
+++ b/exporting/graphite/graphite.c
@@ -218,10 +218,12 @@ void graphite_http_prepare_header(struct instance *instance)
simple_connector_data->last_buffer->header,
"POST /api/put HTTP/1.1\r\n"
"Host: %s\r\n"
+ "%s"
"Content-Type: application/graphite\r\n"
"Content-Length: %lu\r\n"
"\r\n",
instance->config.destination,
+ simple_connector_data->auth_string ? simple_connector_data->auth_string : "",
buffer_strlen(simple_connector_data->last_buffer->buffer));
return;
diff --git a/exporting/init_connectors.c b/exporting/init_connectors.c
index 6aff2635..69ea0685 100644
--- a/exporting/init_connectors.c
+++ b/exporting/init_connectors.c
@@ -92,7 +92,7 @@ int init_connectors(struct engine *engine)
// dispatch the instance worker thread
int error = uv_thread_create(&instance->thread, instance->worker, instance);
if (error) {
- error("EXPORTING: cannot create tread worker. uv_thread_create(): %s", uv_strerror(error));
+ error("EXPORTING: cannot create thread worker. uv_thread_create(): %s", uv_strerror(error));
return 1;
}
char threadname[NETDATA_THREAD_NAME_MAX + 1];
@@ -105,8 +105,57 @@ int init_connectors(struct engine *engine)
return 0;
}
+// TODO: use a base64 encoder from a library
+static size_t base64_encode(unsigned char *input, size_t input_size, char *output, size_t output_size)
+{
+ uint32_t value;
+ static char lookup[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789+/";
+ if ((input_size / 3 + 1) * 4 >= output_size) {
+ error("Output buffer for encoding size=%zu is not large enough for %zu-bytes input", output_size, input_size);
+ return 0;
+ }
+ size_t count = 0;
+ while (input_size > 3) {
+ value = ((input[0] << 16) + (input[1] << 8) + input[2]) & 0xffffff;
+ output[0] = lookup[value >> 18];
+ output[1] = lookup[(value >> 12) & 0x3f];
+ output[2] = lookup[(value >> 6) & 0x3f];
+ output[3] = lookup[value & 0x3f];
+ //error("Base-64 encode (%04x) -> %c %c %c %c\n", value, output[0], output[1], output[2], output[3]);
+ output += 4;
+ input += 3;
+ input_size -= 3;
+ count += 4;
+ }
+ switch (input_size) {
+ case 2:
+ value = (input[0] << 10) + (input[1] << 2);
+ output[0] = lookup[(value >> 12) & 0x3f];
+ output[1] = lookup[(value >> 6) & 0x3f];
+ output[2] = lookup[value & 0x3f];
+ output[3] = '=';
+ //error("Base-64 encode (%06x) -> %c %c %c %c\n", (value>>2)&0xffff, output[0], output[1], output[2], output[3]);
+ count += 4;
+ break;
+ case 1:
+ value = input[0] << 4;
+ output[0] = lookup[(value >> 6) & 0x3f];
+ output[1] = lookup[value & 0x3f];
+ output[2] = '=';
+ output[3] = '=';
+ //error("Base-64 encode (%06x) -> %c %c %c %c\n", value, output[0], output[1], output[2], output[3]);
+ count += 4;
+ break;
+ case 0:
+ break;
+ }
+ return count;
+}
+
/**
- * Initialize a ring buffer for a simple connector
+ * Initialize a ring buffer and credentials for a simple connector
*
* @param instance an instance data structure.
*/
@@ -141,5 +190,25 @@ void simple_connector_init(struct instance *instance)
first_buffer->next = connector_specific_data->first_buffer;
connector_specific_data->last_buffer = connector_specific_data->first_buffer;
+ if (*instance->config.username || *instance->config.password) {
+ BUFFER *auth_string = buffer_create(0);
+
+ buffer_sprintf(auth_string, "%s:%s", instance->config.username, instance->config.password);
+
+ size_t encoded_size = (buffer_strlen(auth_string) / 3 + 1) * 4 + 1;
+ char *encoded_credentials = callocz(1, encoded_size);
+
+ base64_encode((unsigned char*)buffer_tostring(auth_string), buffer_strlen(auth_string), encoded_credentials, encoded_size);
+
+ buffer_flush(auth_string);
+ buffer_sprintf(auth_string, "Authorization: Basic %s\n", encoded_credentials);
+
+ freez(encoded_credentials);
+
+ connector_specific_data->auth_string = strdupz(buffer_tostring(auth_string));
+
+ buffer_free(auth_string);
+ }
+
return;
}
diff --git a/exporting/json/README.md b/exporting/json/README.md
index a0f8472a..7cce463e 100644
--- a/exporting/json/README.md
+++ b/exporting/json/README.md
@@ -22,7 +22,12 @@ directory and set the following options:
```
Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For example: `json:http:my_json_instance`,
-`json:https:my_json_instance`.
+`json:https:my_json_instance`. You can set basic HTTP authentication credentials using
+
+```conf
+ username = my_username
+ password = my_password
+```
The JSON connector is further configurable using additional settings. See the [exporting reference
doc](/exporting/README.md#options) for details.
diff --git a/exporting/json/json.c b/exporting/json/json.c
index f2396baf..50278c5b 100644
--- a/exporting/json/json.c
+++ b/exporting/json/json.c
@@ -352,10 +352,12 @@ void json_http_prepare_header(struct instance *instance)
simple_connector_data->last_buffer->header,
"POST /api/put HTTP/1.1\r\n"
"Host: %s\r\n"
+ "%s"
"Content-Type: application/json\r\n"
"Content-Length: %lu\r\n"
"\r\n",
instance->config.destination,
+ simple_connector_data->auth_string ? simple_connector_data->auth_string : "",
buffer_strlen(simple_connector_data->last_buffer->buffer));
return;
diff --git a/exporting/mongodb/mongodb.c b/exporting/mongodb/mongodb.c
index 44922a24..49ce9526 100644
--- a/exporting/mongodb/mongodb.c
+++ b/exporting/mongodb/mongodb.c
@@ -276,7 +276,9 @@ void mongodb_cleanup(struct instance *instance)
void mongodb_connector_worker(void *instance_p)
{
struct instance *instance = (struct instance *)instance_p;
+#ifdef NETDATA_INTERNAL_CHECKS
struct mongodb_specific_config *connector_specific_config = instance->config.connector_specific_config;
+#endif
struct mongodb_specific_data *connector_specific_data =
(struct mongodb_specific_data *)instance->connector_specific_data;
diff --git a/exporting/opentsdb/README.md b/exporting/opentsdb/README.md
index 3765ad27..0ca6d244 100644
--- a/exporting/opentsdb/README.md
+++ b/exporting/opentsdb/README.md
@@ -22,7 +22,12 @@ directory and set the following options:
```
Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For example: `opentsdb:http:my_opentsdb_instance`,
-`opentsdb:https:my_opentsdb_instance`.
+`opentsdb:https:my_opentsdb_instance`. You can set basic HTTP authentication credentials using
+
+```conf
+ username = my_username
+ password = my_password
+```
The OpenTSDB connector is further configurable using additional settings. See the [exporting reference
doc](/exporting/README.md#options) for details.
diff --git a/exporting/opentsdb/opentsdb.c b/exporting/opentsdb/opentsdb.c
index 1310c150..7ed88fd6 100644
--- a/exporting/opentsdb/opentsdb.c
+++ b/exporting/opentsdb/opentsdb.c
@@ -269,10 +269,12 @@ void opentsdb_http_prepare_header(struct instance *instance)
simple_connector_data->last_buffer->header,
"POST /api/put HTTP/1.1\r\n"
"Host: %s\r\n"
+ "%s"
"Content-Type: application/json\r\n"
"Content-Length: %lu\r\n"
"\r\n",
instance->config.destination,
+ simple_connector_data->auth_string ? simple_connector_data->auth_string : "",
buffer_strlen(simple_connector_data->last_buffer->buffer));
return;
diff --git a/exporting/process_data.c b/exporting/process_data.c
index 5e11b394..2c0c2d17 100644
--- a/exporting/process_data.c
+++ b/exporting/process_data.c
@@ -70,7 +70,9 @@ calculated_number exporting_calculate_value_from_stored_data(
time_t *last_timestamp)
{
RRDSET *st = rd->rrdset;
+#ifdef NETDATA_INTERNAL_CHECKS
RRDHOST *host = st->rrdhost;
+#endif
time_t after = instance->after;
time_t before = instance->before;
diff --git a/exporting/prometheus/README.md b/exporting/prometheus/README.md
index d718a366..ef6f6135 100644
--- a/exporting/prometheus/README.md
+++ b/exporting/prometheus/README.md
@@ -128,46 +128,46 @@ scrape_configs:
#### Install nodes.yml
-The following is completely optional, it will enable Prometheus to generate alerts from some NetData sources. Tweak the
+The following is completely optional, it will enable Prometheus to generate alerts from some Netdata sources. Tweak the
values to your own needs. We will use the following `nodes.yml` file below. Save it at `/opt/prometheus/nodes.yml`, and
add a _- "nodes.yml"_ entry under the _rule_files:_ section in the example prometheus.yml file above.
```yaml
groups:
-- name: nodes
-
- rules:
- - alert: node_high_cpu_usage_70
- expr: avg(rate(netdata_cpu_cpu_percentage_average{dimension="idle"}[1m])) by (job) > 70
- for: 1m
- annotations:
- description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.'
- summary: CPU alert for container node '{{ $labels.job }}'
-
- - alert: node_high_memory_usage_70
- expr: 100 / sum(netdata_system_ram_MB_average) by (job)
- * sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30
- for: 1m
- annotations:
- description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.'
- summary: Memory alert for container node '{{ $labels.job }}'
-
- - alert: node_low_root_filesystem_space_20
- expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job)
- * sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20
- for: 1m
- annotations:
- description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.'
- summary: Root filesystem alert for container node '{{ $labels.job }}'
-
- - alert: node_root_filesystem_fill_rate_6h
- expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0
- for: 1h
- labels:
- severity: critical
- annotations:
- description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h.
- summary: Disk fill alert for Swarm node '{{ $labels.job }}'
+ - name: nodes
+
+ rules:
+ - alert: node_high_cpu_usage_70
+ expr: sum(sum_over_time(netdata_system_cpu_percentage_average{dimension=~"(user|system|softirq|irq|guest)"}[10m])) by (job) / sum(count_over_time(netdata_system_cpu_percentage_average{dimension="idle"}[10m])) by (job) > 70
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} on ''{{ $labels.job }}'' CPU usage is at {{ humanize $value }}%.'
+ summary: CPU alert for container node '{{ $labels.job }}'
+
+ - alert: node_high_memory_usage_70
+ expr: 100 / sum(netdata_system_ram_MB_average) by (job)
+ * sum(netdata_system_ram_MB_average{dimension=~"free|cached"}) by (job) < 30
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} memory usage is {{ humanize $value}}%.'
+ summary: Memory alert for container node '{{ $labels.job }}'
+
+ - alert: node_low_root_filesystem_space_20
+ expr: 100 / sum(netdata_disk_space_GB_average{family="/"}) by (job)
+ * sum(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}) by (job) < 20
+ for: 1m
+ annotations:
+ description: '{{ $labels.job }} root filesystem space is {{ humanize $value}}%.'
+ summary: Root filesystem alert for container node '{{ $labels.job }}'
+
+ - alert: node_root_filesystem_fill_rate_6h
+ expr: predict_linear(netdata_disk_space_GB_average{family="/",dimension=~"avail|cached"}[1h], 6 * 3600) < 0
+ for: 1h
+ labels:
+ severity: critical
+ annotations:
+ description: Container node {{ $labels.job }} root filesystem is going to fill up in 6h.
+ summary: Disk fill alert for Swarm node '{{ $labels.job }}'
```
#### Install prometheus.service
diff --git a/exporting/prometheus/prometheus.c b/exporting/prometheus/prometheus.c
index 6759313c..0a319007 100644
--- a/exporting/prometheus/prometheus.c
+++ b/exporting/prometheus/prometheus.c
@@ -16,7 +16,9 @@
*/
inline int can_send_rrdset(struct instance *instance, RRDSET *st)
{
+#ifdef NETDATA_INTERNAL_CHECKS
RRDHOST *host = st->rrdhost;
+#endif
if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_EXPORTING_IGNORE)))
return 0;
@@ -136,7 +138,7 @@ static inline time_t prometheus_server_last_access(const char *server, RRDHOST *
* Copy and sanitize name.
*
* @param d a destination string.
- * @param s a source sting.
+ * @param s a source string.
* @param usable the number of characters to copy.
* @return Returns the length of the copied string.
*/
@@ -161,7 +163,7 @@ inline size_t prometheus_name_copy(char *d, const char *s, size_t usable)
* Copy and sanitize label.
*
* @param d a destination string.
- * @param s a source sting.
+ * @param s a source string.
* @param usable the number of characters to copy.
* @return Returns the length of the copied string.
*/
@@ -190,7 +192,7 @@ inline size_t prometheus_label_copy(char *d, const char *s, size_t usable)
* Copy and sanitize units.
*
* @param d a destination string.
- * @param s a source sting.
+ * @param s a source string.
* @param usable the number of characters to copy.
* @param showoldunits set this flag to 1 to show old (before v1.12) units.
* @return Returns the destination string.
diff --git a/exporting/prometheus/remote_write/README.md b/exporting/prometheus/remote_write/README.md
index fe901024..ce379063 100644
--- a/exporting/prometheus/remote_write/README.md
+++ b/exporting/prometheus/remote_write/README.md
@@ -41,6 +41,13 @@ For example, if your endpoint is `http://example.domain:example_port/storage/rea
remote write URL path = /storage/read
```
+You can set basic HTTP authentication credentials using
+
+```conf
+ username = my_username
+ password = my_password
+```
+
`buffered` and `lost` dimensions in the Netdata Exporting Connector Data Size operation monitoring chart estimate uncompressed
buffer size on failures.
diff --git a/exporting/prometheus/remote_write/remote_write.c b/exporting/prometheus/remote_write/remote_write.c
index 986ad9f0..8339712e 100644
--- a/exporting/prometheus/remote_write/remote_write.c
+++ b/exporting/prometheus/remote_write/remote_write.c
@@ -25,6 +25,7 @@ void prometheus_remote_write_prepare_header(struct instance *instance)
"POST %s HTTP/1.1\r\n"
"Host: %s\r\n"
"Accept: */*\r\n"
+ "%s"
"Content-Encoding: snappy\r\n"
"Content-Type: application/x-protobuf\r\n"
"X-Prometheus-Remote-Write-Version: 0.1.0\r\n"
@@ -32,6 +33,7 @@ void prometheus_remote_write_prepare_header(struct instance *instance)
"\r\n",
connector_specific_config->remote_write_path,
simple_connector_data->connected_to,
+ simple_connector_data->auth_string ? simple_connector_data->auth_string : "",
buffer_strlen(simple_connector_data->last_buffer->buffer));
}
diff --git a/exporting/read_config.c b/exporting/read_config.c
index ea50fa0f..77687d84 100644
--- a/exporting/read_config.c
+++ b/exporting/read_config.c
@@ -456,6 +456,10 @@ struct engine *read_exporting_config()
tmp_instance->config.destination = strdupz(exporter_get(instance_name, "destination", default_destination));
+ tmp_instance->config.username = strdupz(exporter_get(instance_name, "username", ""));
+
+ tmp_instance->config.password = strdupz(exporter_get(instance_name, "password", ""));
+
tmp_instance->config.prefix = strdupz(exporter_get(instance_name, "prefix", "netdata"));
tmp_instance->config.hostname = strdupz(exporter_get(instance_name, "hostname", engine->config.hostname));
diff --git a/exporting/tests/exporting_doubles.c b/exporting/tests/exporting_doubles.c
index 3c73e032..b8c9f375 100644
--- a/exporting/tests/exporting_doubles.c
+++ b/exporting/tests/exporting_doubles.c
@@ -22,6 +22,8 @@ struct engine *__mock_read_exporting_config()
instance->config.type = EXPORTING_CONNECTOR_TYPE_GRAPHITE;
instance->config.name = strdupz("instance_name");
instance->config.destination = strdupz("localhost");
+ instance->config.username = strdupz("");
+ instance->config.password = strdupz("");
instance->config.prefix = strdupz("netdata");
instance->config.hostname = strdupz("test-host");
instance->config.update_every = 1;
diff --git a/exporting/tests/exporting_fixtures.c b/exporting/tests/exporting_fixtures.c
index b5b0ce81..b632761e 100644
--- a/exporting/tests/exporting_fixtures.c
+++ b/exporting/tests/exporting_fixtures.c
@@ -18,6 +18,8 @@ int teardown_configured_engine(void **state)
struct instance *instance = engine->instance_root;
free((void *)instance->config.destination);
+ free((void *)instance->config.username);
+ free((void *)instance->config.password);
free((void *)instance->config.name);
free((void *)instance->config.prefix);
free((void *)instance->config.hostname);
diff --git a/exporting/tests/test_exporting_engine.c b/exporting/tests/test_exporting_engine.c
index 73fd3ca6..7188c6ee 100644
--- a/exporting/tests/test_exporting_engine.c
+++ b/exporting/tests/test_exporting_engine.c
@@ -312,12 +312,12 @@ static void test_exporting_calculate_value_from_stored_data(void **state)
expect_function_call(__mock_rrddim_query_is_finished);
will_return(__mock_rrddim_query_is_finished, 0);
expect_function_call(__mock_rrddim_query_next_metric);
- will_return(__mock_rrddim_query_next_metric, pack_storage_number(27, SN_EXISTS));
+ will_return(__mock_rrddim_query_next_metric, pack_storage_number(27, SN_DEFAULT_FLAGS));
expect_function_call(__mock_rrddim_query_is_finished);
will_return(__mock_rrddim_query_is_finished, 0);
expect_function_call(__mock_rrddim_query_next_metric);
- will_return(__mock_rrddim_query_next_metric, pack_storage_number(45, SN_EXISTS));
+ will_return(__mock_rrddim_query_next_metric, pack_storage_number(45, SN_DEFAULT_FLAGS));
expect_function_call(__mock_rrddim_query_is_finished);
will_return(__mock_rrddim_query_is_finished, 1);
@@ -431,7 +431,7 @@ static void test_format_dimension_stored_graphite_plaintext(void **state)
struct engine *engine = *state;
expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
RRDDIM *rd = localhost->rrdset_root->dimensions;
assert_int_equal(format_dimension_stored_graphite_plaintext(engine->instance_root, rd), 0);
@@ -459,7 +459,7 @@ static void test_format_dimension_stored_json_plaintext(void **state)
struct engine *engine = *state;
expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
RRDDIM *rd = localhost->rrdset_root->dimensions;
assert_int_equal(format_dimension_stored_json_plaintext(engine->instance_root, rd), 0);
@@ -487,7 +487,7 @@ static void test_format_dimension_stored_opentsdb_telnet(void **state)
struct engine *engine = *state;
expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
RRDDIM *rd = localhost->rrdset_root->dimensions;
assert_int_equal(format_dimension_stored_opentsdb_telnet(engine->instance_root, rd), 0);
@@ -515,7 +515,7 @@ static void test_format_dimension_stored_opentsdb_http(void **state)
struct engine *engine = *state;
expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
RRDDIM *rd = localhost->rrdset_root->dimensions;
assert_int_equal(format_dimension_stored_opentsdb_http(engine->instance_root, rd), 0);
@@ -1053,7 +1053,7 @@ static void test_format_host_labels_prometheus(void **state)
instance->config.options |= EXPORTING_OPTION_SEND_AUTOMATIC_LABELS;
format_host_labels_prometheus(instance, localhost);
- assert_string_equal(buffer_tostring(instance->labels), "key1=\"netdata\",key2=\"value2\"");
+ assert_string_equal(buffer_tostring(instance->labels), "key1=\"value1\",key2=\"value2\"");
}
static void rrd_stats_api_v1_charts_allmetrics_prometheus(void **state)
@@ -1070,7 +1070,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(void **state)
will_return(__wrap_now_realtime_sec, 2);
expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(localhost, buffer, "test_server", "test_prefix", 0, 0);
@@ -1087,7 +1087,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(void **state)
will_return(__wrap_now_realtime_sec, 2);
expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
localhost, buffer, "test_server", "test_prefix", 0, PROMETHEUS_OUTPUT_NAMES | PROMETHEUS_OUTPUT_TYPES);
@@ -1106,7 +1106,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(void **state)
will_return(__wrap_now_realtime_sec, 2);
expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(localhost, buffer, "test_server", "test_prefix", 0, 0);
@@ -1265,7 +1265,7 @@ static void test_format_dimension_prometheus_remote_write(void **state)
RRDDIM *rd = localhost->rrdset_root->dimensions;
expect_function_call(__wrap_exporting_calculate_value_from_stored_data);
- will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_EXISTS));
+ will_return(__wrap_exporting_calculate_value_from_stored_data, pack_storage_number(27, SN_DEFAULT_FLAGS));
expect_function_call(__wrap_add_metric);
expect_value(__wrap_add_metric, write_request_p, 0xff);
@@ -1877,7 +1877,7 @@ int main(void)
cmocka_unit_test_setup_teardown(test_prometheus_label_copy, setup_prometheus, teardown_prometheus),
cmocka_unit_test_setup_teardown(test_prometheus_units_copy, setup_prometheus, teardown_prometheus),
cmocka_unit_test_setup_teardown(
- test_format_host_labels_prometheus, setup_configured_engine, teardown_configured_engine),
+ test_format_host_labels_prometheus, setup_initialized_engine, teardown_initialized_engine),
cmocka_unit_test_setup_teardown(
rrd_stats_api_v1_charts_allmetrics_prometheus, setup_prometheus, teardown_prometheus),
};