summaryrefslogtreecommitdiffstats
path: root/exporting
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2022-01-26 18:05:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2022-01-26 18:05:10 +0000
commit34a0b66bc2d48223748ed1cf5bc1b305c396bd74 (patch)
treefbd36be86cc6bc4288fe627f2b5beada569848bb /exporting
parentAdding upstream version 1.32.1. (diff)
downloadnetdata-34a0b66bc2d48223748ed1cf5bc1b305c396bd74.tar.xz
netdata-34a0b66bc2d48223748ed1cf5bc1b305c396bd74.zip
Adding upstream version 1.33.0.upstream/1.33.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'exporting')
-rw-r--r--exporting/README.md6
-rw-r--r--exporting/WALKTHROUGH.md12
-rw-r--r--exporting/prometheus/remote_write/remote_write_request.cc40
-rw-r--r--exporting/prometheus/remote_write/remote_write_request.h6
-rw-r--r--exporting/pubsub/README.md9
-rw-r--r--exporting/pubsub/pubsub_publish.cc2
-rw-r--r--exporting/tests/test_exporting_engine.c78
7 files changed, 118 insertions, 35 deletions
diff --git a/exporting/README.md b/exporting/README.md
index ef485bb18..18f56fbb6 100644
--- a/exporting/README.md
+++ b/exporting/README.md
@@ -1,11 +1,11 @@
<!--
-title: "Exporting engine reference"
+title: "Exporting reference"
description: "With the exporting engine, you can archive your Netdata metrics to multiple external databases for long-term storage or further analysis."
-sidebar_label: Reference guide
+sidebar_label: Exporting reference
custom_edit_url: https://github.com/netdata/netdata/edit/master/exporting/README.md
-->
-# Exporting engine reference
+# Exporting reference
Welcome to the exporting engine reference guide. This guide contains comprehensive information about enabling,
configuring, and monitoring Netdata's exporting engine, which allows you to send metrics to external time-series
diff --git a/exporting/WALKTHROUGH.md b/exporting/WALKTHROUGH.md
index 24afd2097..c17ee1650 100644
--- a/exporting/WALKTHROUGH.md
+++ b/exporting/WALKTHROUGH.md
@@ -99,7 +99,7 @@ reading to migrate this tutorial to a VM or Server of any sort.
Let's start another container in the same fashion as we did the Netdata container.
```sh
-docker run -it --name prometheus --hostname prometheus
+docker run -it --name prometheus --hostname prometheus \
--network=netdata-tutorial -p 9090:9090 centos:latest '/bin/bash'
```
@@ -110,6 +110,12 @@ files later in this tutorial.
yum install vim -y
```
+You will also need `wget` and `curl` to download files and `sudo` if you are not root.
+
+```sh
+yum install curl sudo wget -y
+```
+
Prometheus provides a tarball of their latest stable versions [here](https://prometheus.io/download/).
Let's download the latest version and install into your container.
@@ -129,7 +135,7 @@ This should get Prometheus installed into the container. Let's test that we can
interface.
```sh
-/opt/prometheus/prometheus
+/opt/prometheus/prometheus --config.file=/opt/prometheus/prometheus.yml
```
Now attempt to go to <http://localhost:9090/>. You should be presented with the Prometheus homepage. This is a good
@@ -216,7 +222,7 @@ the `chart` dimension. If you'd like you can combine the `chart` and `instance`
Let's give this a try: `netdata_system_cpu_percentage_average{chart="system.cpu", instance="netdata:19999"}`
This is the basics of using Prometheus to query Netdata. I'd advise everyone at this point to read [this
-page](/exporting/prometheus/#using-netdata-with-prometheus). The key point here is that Netdata can export metrics from
+page](/exporting/prometheus/README.md#using-netdata-with-prometheus). The key point here is that Netdata can export metrics from
its internal DB or can send metrics _as-collected_ by specifying the `source=as-collected` URL parameter like so.
<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes&types=yes&source=as-collected> If you choose to use
this method you will need to use Prometheus's set of functions here: <https://prometheus.io/docs/querying/functions/> to
diff --git a/exporting/prometheus/remote_write/remote_write_request.cc b/exporting/prometheus/remote_write/remote_write_request.cc
index 48d19efd6..cfd61271e 100644
--- a/exporting/prometheus/remote_write/remote_write_request.cc
+++ b/exporting/prometheus/remote_write/remote_write_request.cc
@@ -178,6 +178,46 @@ int pack_and_clear_write_request(void *write_request_p, char *buffer, size_t *si
}
/**
+ * Writes an unpacked write request into a text buffer
+ *
+ * @param write_request_p the write request
+ * @param buffer a buffer, where text is written
+ * @param size the size of the buffer
+ * @return Returns 0 on success, 1 on failure
+ */
+int convert_write_request_to_string(
+ const char *compressed_write_request,
+ size_t compressed_size,
+ char *buffer,
+ size_t size)
+{
+ size_t uncompressed_size = 0;
+
+ snappy::GetUncompressedLength(compressed_write_request, compressed_size, &uncompressed_size);
+ if (size < uncompressed_size)
+ return 1;
+ char *uncompressed_write_request = (char *)malloc(size);
+
+ if (snappy::RawUncompress(compressed_write_request, compressed_size, uncompressed_write_request) == false) {
+ free(uncompressed_write_request);
+ return 1;
+ }
+
+ WriteRequest *write_request = google::protobuf::Arena::CreateMessage<WriteRequest>(&arena);
+ if (write_request->ParseFromString(std::string(uncompressed_write_request, uncompressed_size)) == false) {
+ free(uncompressed_write_request);
+ return 1;
+ }
+
+ std::string text_write_request(write_request->DebugString());
+ text_write_request.copy(buffer, size);
+
+ free(uncompressed_write_request);
+
+ return 0;
+}
+
+/**
* Shuts down the Protobuf library
*/
void protocol_buffers_shutdown()
diff --git a/exporting/prometheus/remote_write/remote_write_request.h b/exporting/prometheus/remote_write/remote_write_request.h
index e1dfacaf8..5f242b941 100644
--- a/exporting/prometheus/remote_write/remote_write_request.h
+++ b/exporting/prometheus/remote_write/remote_write_request.h
@@ -24,6 +24,12 @@ size_t get_write_request_size(void *write_request_p);
int pack_and_clear_write_request(void *write_request_p, char *buffer, size_t *size);
+int convert_write_request_to_string(
+ const char *compressed_write_request,
+ size_t compressed_size,
+ char *buffer,
+ size_t size);
+
void protocol_buffers_shutdown();
#ifdef __cplusplus
diff --git a/exporting/pubsub/README.md b/exporting/pubsub/README.md
index 6da14c44f..73b6a2031 100644
--- a/exporting/pubsub/README.md
+++ b/exporting/pubsub/README.md
@@ -10,17 +10,10 @@ sidebar_label: Google Cloud Pub/Sub Service
## Prerequisites
To use the Pub/Sub service for metric collecting and processing, you should first
-[install](https://github.com/googleapis/cpp-cmakefiles) Google Cloud Platform C++ Proto Libraries.
+[install](https://github.com/googleapis/google-cloud-cpp/) Google Cloud Platform C++ Client Libraries.
Pub/Sub support is also dependent on the dependencies of those libraries, like `protobuf`, `protoc`, and `grpc`. Next,
Netdata should be re-installed from the source. The installer will detect that the required libraries are now available.
-> You [cannot compile Netdata](https://github.com/netdata/netdata/issues/10193) with Pub/Sub support enabled using
-> `grpc` 1.32 or higher.
->
-> Some distributions don't have `.cmake` files in packages. To build the C++ Proto Libraries on such distributions we
-> advise you to delete `protobuf`, `protoc`, and `grpc` related packages and
-> [install](https://github.com/grpc/grpc/blob/master/BUILDING.md) `grpc` with its dependencies from source.
-
## Configuration
To enable data sending to the Pub/Sub service, run `./edit-config exporting.conf` in the Netdata configuration directory
diff --git a/exporting/pubsub/pubsub_publish.cc b/exporting/pubsub/pubsub_publish.cc
index 6122dddba..cc14154f8 100644
--- a/exporting/pubsub/pubsub_publish.cc
+++ b/exporting/pubsub/pubsub_publish.cc
@@ -194,7 +194,7 @@ int pubsub_get_result(
{
struct pubsub_specific_data *connector_specific_data = (struct pubsub_specific_data *)pubsub_specific_data_p;
std::list<struct response> *responses = (std::list<struct response> *)connector_specific_data->responses;
- grpc_impl::CompletionQueue::NextStatus next_status;
+ grpc::CompletionQueue::NextStatus next_status;
*sent_metrics = 0;
*sent_bytes = 0;
diff --git a/exporting/tests/test_exporting_engine.c b/exporting/tests/test_exporting_engine.c
index 7188c6eee..fb08ff43b 100644
--- a/exporting/tests/test_exporting_engine.c
+++ b/exporting/tests/test_exporting_engine.c
@@ -1309,27 +1309,65 @@ static void test_format_batch_prometheus_remote_write(void **state)
assert_int_equal(format_batch_prometheus_remote_write(instance), 0);
BUFFER *buffer = instance->buffer;
- assert_int_equal(buffer_strlen(buffer), 192);
-
- BUFFER *escaped_buffer = buffer_create(850);
- size_t len = buffer_strlen(buffer);
- char *ch = (char *)buffer_tostring(buffer);
- for (; len > 0; ch++, len--)
- buffer_sprintf(escaped_buffer, "\\%03o", (unsigned int)*ch);
+ char *write_request_string = calloc(1, 1000);
+ convert_write_request_to_string(buffer_tostring(buffer), buffer_strlen(buffer), write_request_string, 999);
+ assert_int_equal(strlen(write_request_string), 753);
assert_string_equal(
- buffer_tostring(escaped_buffer),
- "\\37777777641\\002\\120\\012\\37777777622\\001\\012\\025\\012\\010\\137\\137\\156\\141\\155\\145\\137\\137"
- "\\022\\011\\164\\145\\163\\164\\005\\015\\064\\012\\031\\012\\010\\151\\156\\163\\164\\141\\156\\143\\145\\022"
- "\\015\\005\\027\\021\\017\\100\\012\\037\\012\\013\\141\\160\\160\\154\\151\\143\\141\\164\\151\\157\\156\\022"
- "\\020\\005\\036\\035\\022\\034\\012\\027\\012\\007\\166\\145\\162\\163\\001\\035\\000\\014\\005\\035\\015\\016"
- "\\014\\012\\026\\012\\010\\005\\020\\020\\153\\145\\171\\022\\012\\005\\012\\040\\166\\141\\154\\165\\145\\022"
- "\\014\\011\\000\\005\\001\\030\\37777777760\\077\\020\\37777777713\\165\\012\\37777777611\\142\\37777777625"
- "\\000\\034\\023\\012\\005\\143\\150\\141\\162\\164\\011\\075\\000\\040\\005\\014\\054\\012\\025\\012\\006\\146"
- "\\141\\155\\151\\154\\171\\022\\013\\005\\123\\011\\015\\040\\012\\033\\012\\011\\144\\151\\155\\145\\156\\005"
- "\\37777777607\\000\\016\\005\\032\\025\\020\\000\\012\\146\\37777777736\\000\\064\\022\\014\\011\\000\\000\\000"
- "\\004\\130\\123\\37777777635\\101\\020\\37777777714\\165");
-
- buffer_free(escaped_buffer);
+ write_request_string,
+ "timeseries {\n"
+ " labels {\n"
+ " name: \"__name__\"\n"
+ " value: \"test_name\"\n"
+ " }\n"
+ " labels {\n"
+ " name: \"instance\"\n"
+ " value: \"test_instance\"\n"
+ " }\n"
+ " labels {\n"
+ " name: \"application\"\n"
+ " value: \"test_application\"\n"
+ " }\n"
+ " labels {\n"
+ " name: \"version\"\n"
+ " value: \"test_version\"\n"
+ " }\n"
+ " labels {\n"
+ " name: \"test_key\"\n"
+ " value: \"test_value\"\n"
+ " }\n"
+ " samples {\n"
+ " value: 1\n"
+ " timestamp: 15051\n"
+ " }\n"
+ "}\n"
+ "timeseries {\n"
+ " labels {\n"
+ " name: \"__name__\"\n"
+ " value: \"test_name\"\n"
+ " }\n"
+ " labels {\n"
+ " name: \"chart\"\n"
+ " value: \"test chart\"\n"
+ " }\n"
+ " labels {\n"
+ " name: \"family\"\n"
+ " value: \"test_family\"\n"
+ " }\n"
+ " labels {\n"
+ " name: \"dimension\"\n"
+ " value: \"test_dimension\"\n"
+ " }\n"
+ " labels {\n"
+ " name: \"instance\"\n"
+ " value: \"test_instance\"\n"
+ " }\n"
+ " samples {\n"
+ " value: 123000321\n"
+ " timestamp: 15052\n"
+ " }\n"
+ "}\n");
+ free(write_request_string);
+
protocol_buffers_shutdown();
}
#endif // ENABLE_PROMETHEUS_REMOTE_WRITE