diff options
Diffstat (limited to 'exporting/aws_kinesis')
-rw-r--r-- | exporting/aws_kinesis/Makefile.am | 8 | ||||
l--------- | exporting/aws_kinesis/README.md | 1 | ||||
-rw-r--r-- | exporting/aws_kinesis/aws_kinesis.c | 219 | ||||
-rw-r--r-- | exporting/aws_kinesis/aws_kinesis.h | 16 | ||||
-rw-r--r-- | exporting/aws_kinesis/aws_kinesis_put_record.cc | 151 | ||||
-rw-r--r-- | exporting/aws_kinesis/aws_kinesis_put_record.h | 35 | ||||
-rw-r--r-- | exporting/aws_kinesis/integrations/aws_kinesis.md | 168 | ||||
-rw-r--r-- | exporting/aws_kinesis/metadata.yaml | 173 |
8 files changed, 0 insertions, 771 deletions
diff --git a/exporting/aws_kinesis/Makefile.am b/exporting/aws_kinesis/Makefile.am deleted file mode 100644 index 161784b8..00000000 --- a/exporting/aws_kinesis/Makefile.am +++ /dev/null @@ -1,8 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later - -AUTOMAKE_OPTIONS = subdir-objects -MAINTAINERCLEANFILES = $(srcdir)/Makefile.in - -dist_noinst_DATA = \ - README.md \ - $(NULL) diff --git a/exporting/aws_kinesis/README.md b/exporting/aws_kinesis/README.md deleted file mode 120000 index dbc98ac1..00000000 --- a/exporting/aws_kinesis/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/aws_kinesis.md
\ No newline at end of file diff --git a/exporting/aws_kinesis/aws_kinesis.c b/exporting/aws_kinesis/aws_kinesis.c deleted file mode 100644 index 498d9ee2..00000000 --- a/exporting/aws_kinesis/aws_kinesis.c +++ /dev/null @@ -1,219 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include "aws_kinesis.h" - -/** - * Clean AWS Kinesis * - */ -void aws_kinesis_cleanup(struct instance *instance) -{ - netdata_log_info("EXPORTING: cleaning up instance %s ...", instance->config.name); - kinesis_shutdown(instance->connector_specific_data); - - freez(instance->connector_specific_data); - - struct aws_kinesis_specific_config *connector_specific_config = instance->config.connector_specific_config; - if (connector_specific_config) { - freez(connector_specific_config->auth_key_id); - freez(connector_specific_config->secure_key); - freez(connector_specific_config->stream_name); - - freez(connector_specific_config); - } - - netdata_log_info("EXPORTING: instance %s exited", instance->config.name); - instance->exited = 1; -} - -/** - * Initialize AWS Kinesis connector instance - * - * @param instance an instance data structure. - * @return Returns 0 on success, 1 on failure. - */ -int init_aws_kinesis_instance(struct instance *instance) -{ - instance->worker = aws_kinesis_connector_worker; - - instance->start_batch_formatting = NULL; - instance->start_host_formatting = format_host_labels_json_plaintext; - instance->start_chart_formatting = NULL; - - if (EXPORTING_OPTIONS_DATA_SOURCE(instance->config.options) == EXPORTING_SOURCE_DATA_AS_COLLECTED) - instance->metric_formatting = format_dimension_collected_json_plaintext; - else - instance->metric_formatting = format_dimension_stored_json_plaintext; - - instance->end_chart_formatting = NULL; - instance->variables_formatting = NULL; - instance->end_host_formatting = flush_host_labels; - instance->end_batch_formatting = NULL; - - instance->prepare_header = NULL; - instance->check_response = NULL; - - instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters); - if (!instance->buffer) { - netdata_log_error("EXPORTING: cannot create buffer for AWS Kinesis exporting connector instance %s", - instance->config.name); - return 1; - } - if (uv_mutex_init(&instance->mutex)) - return 1; - if (uv_cond_init(&instance->cond_var)) - return 1; - - if (!instance->engine->aws_sdk_initialized) { - aws_sdk_init(); - instance->engine->aws_sdk_initialized = 1; - } - - struct aws_kinesis_specific_config *connector_specific_config = instance->config.connector_specific_config; - struct aws_kinesis_specific_data *connector_specific_data = callocz(1, sizeof(struct aws_kinesis_specific_data)); - instance->connector_specific_data = (void *)connector_specific_data; - - if (!strcmp(connector_specific_config->stream_name, "")) { - netdata_log_error("stream name is a mandatory Kinesis parameter but it is not configured"); - return 1; - } - - kinesis_init( - (void *)connector_specific_data, - instance->config.destination, - connector_specific_config->auth_key_id, - connector_specific_config->secure_key, - instance->config.timeoutms); - - return 0; -} - -/** - * AWS Kinesis connector worker - * - * Runs in a separate thread for every instance. - * - * @param instance_p an instance data structure. - */ -void aws_kinesis_connector_worker(void *instance_p) -{ - struct instance *instance = (struct instance *)instance_p; - struct aws_kinesis_specific_config *connector_specific_config = instance->config.connector_specific_config; - struct aws_kinesis_specific_data *connector_specific_data = instance->connector_specific_data; - - while (!instance->engine->exit) { - unsigned long long partition_key_seq = 0; - struct stats *stats = &instance->stats; - - uv_mutex_lock(&instance->mutex); - while (!instance->data_is_ready) - uv_cond_wait(&instance->cond_var, &instance->mutex); - instance->data_is_ready = 0; - - if (unlikely(instance->engine->exit)) { - uv_mutex_unlock(&instance->mutex); - break; - } - - // reset the monitoring chart counters - stats->received_bytes = - stats->sent_bytes = - stats->sent_metrics = - stats->lost_metrics = - stats->receptions = - stats->transmission_successes = - stats->transmission_failures = - stats->data_lost_events = - stats->lost_bytes = - stats->reconnects = 0; - - BUFFER *buffer = (BUFFER *)instance->buffer; - size_t buffer_len = buffer_strlen(buffer); - - stats->buffered_bytes = buffer_len; - - size_t sent = 0; - - while (sent < buffer_len) { - char partition_key[KINESIS_PARTITION_KEY_MAX + 1]; - snprintf(partition_key, KINESIS_PARTITION_KEY_MAX, "netdata_%llu", partition_key_seq++); - size_t partition_key_len = strnlen(partition_key, KINESIS_PARTITION_KEY_MAX); - - const char *first_char = buffer_tostring(buffer) + sent; - - size_t record_len = 0; - - // split buffer into chunks of maximum allowed size - if (buffer_len - sent < KINESIS_RECORD_MAX - partition_key_len) { - record_len = buffer_len - sent; - } else { - record_len = KINESIS_RECORD_MAX - partition_key_len; - while (record_len && *(first_char + record_len - 1) != '\n') - record_len--; - } - char error_message[ERROR_LINE_MAX + 1] = ""; - - netdata_log_debug(D_EXPORTING, - "EXPORTING: kinesis_put_record(): dest = %s, id = %s, key = %s, stream = %s, partition_key = %s, " - "buffer = %zu, record = %zu", - instance->config.destination, - connector_specific_config->auth_key_id, - connector_specific_config->secure_key, - connector_specific_config->stream_name, - partition_key, - buffer_len, - record_len); - - kinesis_put_record( - connector_specific_data, connector_specific_config->stream_name, partition_key, first_char, record_len); - - sent += record_len; - stats->transmission_successes++; - - size_t sent_bytes = 0, lost_bytes = 0; - - if (unlikely(kinesis_get_result( - connector_specific_data->request_outcomes, error_message, &sent_bytes, &lost_bytes))) { - // oops! we couldn't send (all or some of the) data - netdata_log_error("EXPORTING: %s", error_message); - netdata_log_error("EXPORTING: failed to write data to external database '%s'. Willing to write %zu bytes, wrote %zu bytes.", - instance->config.destination, - sent_bytes, - sent_bytes - lost_bytes); - - stats->transmission_failures++; - stats->data_lost_events++; - stats->lost_bytes += lost_bytes; - - // estimate the number of lost metrics - stats->lost_metrics += (collected_number)( - stats->buffered_metrics * - (buffer_len && (lost_bytes > buffer_len) ? (double)lost_bytes / buffer_len : 1)); - - break; - } else { - stats->receptions++; - } - - if (unlikely(instance->engine->exit)) - break; - } - - stats->sent_bytes += sent; - if (likely(sent == buffer_len)) - stats->sent_metrics = stats->buffered_metrics; - - buffer_flush(buffer); - - send_internal_metrics(instance); - - stats->buffered_metrics = 0; - - uv_mutex_unlock(&instance->mutex); - -#ifdef UNIT_TESTING - return; -#endif - } - - aws_kinesis_cleanup(instance); -} diff --git a/exporting/aws_kinesis/aws_kinesis.h b/exporting/aws_kinesis/aws_kinesis.h deleted file mode 100644 index d88a4586..00000000 --- a/exporting/aws_kinesis/aws_kinesis.h +++ /dev/null @@ -1,16 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_EXPORTING_KINESIS_H -#define NETDATA_EXPORTING_KINESIS_H - -#include "exporting/exporting_engine.h" -#include "exporting/json/json.h" -#include "aws_kinesis_put_record.h" - -#define KINESIS_PARTITION_KEY_MAX 256 -#define KINESIS_RECORD_MAX 1024 * 1024 - -int init_aws_kinesis_instance(struct instance *instance); -void aws_kinesis_connector_worker(void *instance_p); - -#endif //NETDATA_EXPORTING_KINESIS_H diff --git a/exporting/aws_kinesis/aws_kinesis_put_record.cc b/exporting/aws_kinesis/aws_kinesis_put_record.cc deleted file mode 100644 index 62c6b030..00000000 --- a/exporting/aws_kinesis/aws_kinesis_put_record.cc +++ /dev/null @@ -1,151 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#include <aws/core/Aws.h> -#include <aws/core/client/ClientConfiguration.h> -#include <aws/core/auth/AWSCredentials.h> -#include <aws/core/utils/Outcome.h> -#include <aws/kinesis/KinesisClient.h> -#include <aws/kinesis/model/PutRecordRequest.h> -#include "aws_kinesis_put_record.h" - -using namespace Aws; - -static SDKOptions options; - -struct request_outcome { - Kinesis::Model::PutRecordOutcomeCallable future_outcome; - size_t data_len; -}; - -/** - * Initialize AWS SDK API - */ -void aws_sdk_init() -{ - InitAPI(options); -} - -/** - * Shutdown AWS SDK API - */ -void aws_sdk_shutdown() -{ - ShutdownAPI(options); -} - -/** - * Initialize a client and a data structure for request outcomes - * - * @param kinesis_specific_data_p a pointer to a structure with client and request outcome information. - * @param region AWS region. - * @param access_key_id AWS account access key ID. - * @param secret_key AWS account secret access key. - * @param timeout communication timeout. - */ -void kinesis_init( - void *kinesis_specific_data_p, const char *region, const char *access_key_id, const char *secret_key, - const long timeout) -{ - struct aws_kinesis_specific_data *kinesis_specific_data = - (struct aws_kinesis_specific_data *)kinesis_specific_data_p; - - Client::ClientConfiguration config; - - config.region = region; - config.requestTimeoutMs = timeout; - config.connectTimeoutMs = timeout; - - Kinesis::KinesisClient *client; - - if (access_key_id && *access_key_id && secret_key && *secret_key) { - client = New<Kinesis::KinesisClient>("client", Auth::AWSCredentials(access_key_id, secret_key), config); - } else { - client = New<Kinesis::KinesisClient>("client", config); - } - kinesis_specific_data->client = (void *)client; - - Vector<request_outcome> *request_outcomes; - - request_outcomes = new Vector<request_outcome>; - kinesis_specific_data->request_outcomes = (void *)request_outcomes; -} - -/** - * Deallocate Kinesis specific data - * - * @param kinesis_specific_data_p a pointer to a structure with client and request outcome information. - */ -void kinesis_shutdown(void *kinesis_specific_data_p) -{ - struct aws_kinesis_specific_data *kinesis_specific_data = - (struct aws_kinesis_specific_data *)kinesis_specific_data_p; - - Delete((Kinesis::KinesisClient *)kinesis_specific_data->client); - delete (Vector<request_outcome> *)kinesis_specific_data->request_outcomes; -} - -/** - * Send data to the Kinesis service - * - * @param kinesis_specific_data_p a pointer to a structure with client and request outcome information. - * @param stream_name the name of a stream to send to. - * @param partition_key a partition key which automatically maps data to a specific stream. - * @param data a data buffer to send to the stream. - * @param data_len the length of the data buffer. - */ -void kinesis_put_record( - void *kinesis_specific_data_p, const char *stream_name, const char *partition_key, const char *data, - size_t data_len) -{ - struct aws_kinesis_specific_data *kinesis_specific_data = - (struct aws_kinesis_specific_data *)kinesis_specific_data_p; - Kinesis::Model::PutRecordRequest request; - - request.SetStreamName(stream_name); - request.SetPartitionKey(partition_key); - request.SetData(Utils::ByteBuffer((unsigned char *)data, data_len)); - - ((Vector<request_outcome> *)(kinesis_specific_data->request_outcomes))->push_back( - { ((Kinesis::KinesisClient *)(kinesis_specific_data->client))->PutRecordCallable(request), data_len }); -} - -/** - * Get results from service responses - * - * @param request_outcomes_p request outcome information. - * @param error_message report error message to a caller. - * @param sent_bytes report to a caller how many bytes was successfully sent. - * @param lost_bytes report to a caller how many bytes was lost during transmission. - * @return Returns 0 if all data was sent successfully, 1 when data was lost on transmission - */ -int kinesis_get_result(void *request_outcomes_p, char *error_message, size_t *sent_bytes, size_t *lost_bytes) -{ - Vector<request_outcome> *request_outcomes = (Vector<request_outcome> *)request_outcomes_p; - Kinesis::Model::PutRecordOutcome outcome; - *sent_bytes = 0; - *lost_bytes = 0; - - for (auto request_outcome = request_outcomes->begin(); request_outcome != request_outcomes->end();) { - std::future_status status = request_outcome->future_outcome.wait_for(std::chrono::microseconds(100)); - - if (status == std::future_status::ready || status == std::future_status::deferred) { - outcome = request_outcome->future_outcome.get(); - *sent_bytes += request_outcome->data_len; - - if (!outcome.IsSuccess()) { - *lost_bytes += request_outcome->data_len; - outcome.GetError().GetMessage().copy(error_message, ERROR_LINE_MAX); - } - - request_outcomes->erase(request_outcome); - } else { - ++request_outcome; - } - } - - if (*lost_bytes) { - return 1; - } - - return 0; -} diff --git a/exporting/aws_kinesis/aws_kinesis_put_record.h b/exporting/aws_kinesis/aws_kinesis_put_record.h deleted file mode 100644 index 321baf66..00000000 --- a/exporting/aws_kinesis/aws_kinesis_put_record.h +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -#ifndef NETDATA_EXPORTING_KINESIS_PUT_RECORD_H -#define NETDATA_EXPORTING_KINESIS_PUT_RECORD_H - -#define ERROR_LINE_MAX 1023 - -#ifdef __cplusplus -extern "C" { -#endif - -struct aws_kinesis_specific_data { - void *client; - void *request_outcomes; -}; - -void aws_sdk_init(); -void aws_sdk_shutdown(); - -void kinesis_init( - void *kinesis_specific_data_p, const char *region, const char *access_key_id, const char *secret_key, - const long timeout); -void kinesis_shutdown(void *client); - -void kinesis_put_record( - void *kinesis_specific_data_p, const char *stream_name, const char *partition_key, const char *data, - size_t data_len); - -int kinesis_get_result(void *request_outcomes_p, char *error_message, size_t *sent_bytes, size_t *lost_bytes); - -#ifdef __cplusplus -} -#endif - -#endif //NETDATA_EXPORTING_KINESIS_PUT_RECORD_H diff --git a/exporting/aws_kinesis/integrations/aws_kinesis.md b/exporting/aws_kinesis/integrations/aws_kinesis.md deleted file mode 100644 index deff55be..00000000 --- a/exporting/aws_kinesis/integrations/aws_kinesis.md +++ /dev/null @@ -1,168 +0,0 @@ -<!--startmeta -custom_edit_url: "https://github.com/netdata/netdata/edit/master/exporting/aws_kinesis/README.md" -meta_yaml: "https://github.com/netdata/netdata/edit/master/exporting/aws_kinesis/metadata.yaml" -sidebar_label: "AWS Kinesis" -learn_status: "Published" -learn_rel_path: "Exporting" -message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE EXPORTER'S metadata.yaml FILE" -endmeta--> - -# AWS Kinesis - - -<img src="https://netdata.cloud/img/aws-kinesis.svg" width="150"/> - - -Export metrics to AWS Kinesis Data Streams - - - -<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" /> - -## Setup - -### Prerequisites - -#### - -- First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++ -- Here are the instructions when building from source, to ensure 3rd party dependencies are installed: - ```bash - git clone --recursive https://github.com/aws/aws-sdk-cpp.git - cd aws-sdk-cpp/ - git submodule update --init --recursive - mkdir BUILT - cd BUILT - cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis .. - make - make install - ``` -- `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled. -- Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available. - - - -### Configuration - -#### File - -The configuration file name for this integration is `exporting.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config exporting.conf -``` -#### Options - -Netdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly. -The following options can be defined for this exporter. - - -<details><summary>Config options</summary> - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| enabled | Enables or disables an exporting connector instance (yes/no). | no | yes | -| destination | Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics. | no | yes | -| username | Username for HTTP authentication | my_username | no | -| password | Password for HTTP authentication | my_password | no | -| data source | Selects the kind of data that will be sent to the external database. (as collected/average/sum) | | no | -| hostname | The hostname to be used for sending data to the external database server. | [global].hostname | no | -| prefix | The prefix to add to all metrics. | Netdata | no | -| update every | Frequency of sending sending data to the external database, in seconds. | 10 | no | -| buffer on failures | The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. | 10 | no | -| timeout ms | The timeout in milliseconds to wait for the external database server to process the data. | 2 * update_every * 1000 | no | -| send hosts matching | Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). | localhost * | no | -| send charts matching | One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. | * | no | -| send names instead of ids | Controls the metric names Netdata should send to the external database (yes/no). | | no | -| send configured labels | Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes/no). | | no | -| send automatic labels | Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes/no). | | no | - -##### destination - -The format of each item in this list, is: [PROTOCOL:]IP[:PORT]. -- PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine. -- IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port. -- PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used. - -Example IPv4: - ```yaml - destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242 - ``` -Example IPv6 and IPv4 together: -```yaml -destination = [ffff:...:0001]:2003 10.11.12.1:2003 -``` -When multiple servers are defined, Netdata will try the next one when the previous one fails. - - -##### update every - -Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers -send data to the same database. This randomness does not affect the quality of the data, only the time they are sent. - - -##### buffer on failures - -If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it). - - -##### send hosts matching - -Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern). -The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to -filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts. - -A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`, -use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative). - - -##### send charts matching - -A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads, -use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used, -positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter -has a higher priority than the configuration option. - - -##### send names instead of ids - -Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names -are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are -different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc. - - -</details> - -#### Examples - -##### Example configuration - -Basic configuration - -```yaml -[kinesis:my_instance] - enabled = yes - destination = us-east-1 - -``` -##### Configuration with AWS credentials - -Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`. - -```yaml -[kinesis:my_instance] - enabled = yes - destination = us-east-1 - # AWS credentials - aws_access_key_id = your_access_key_id - aws_secret_access_key = your_secret_access_key - # destination stream - stream name = your_stream_name - -``` - diff --git a/exporting/aws_kinesis/metadata.yaml b/exporting/aws_kinesis/metadata.yaml deleted file mode 100644 index 96e4044e..00000000 --- a/exporting/aws_kinesis/metadata.yaml +++ /dev/null @@ -1,173 +0,0 @@ -# yamllint disable rule:line-length ---- -id: 'export-aws-kinesis' -meta: - name: 'AWS Kinesis' - link: 'https://aws.amazon.com/kinesis/' - categories: - - export - icon_filename: 'aws-kinesis.svg' -keywords: - - exporter - - AWS - - Kinesis -overview: - exporter_description: | - Export metrics to AWS Kinesis Data Streams - exporter_limitations: '' -setup: - prerequisites: - list: - - title: '' - description: | - - First [install](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) AWS SDK for C++ - - Here are the instructions when building from source, to ensure 3rd party dependencies are installed: - ```bash - git clone --recursive https://github.com/aws/aws-sdk-cpp.git - cd aws-sdk-cpp/ - git submodule update --init --recursive - mkdir BUILT - cd BUILT - cmake -DCMAKE_INSTALL_PREFIX=/usr -DBUILD_ONLY=kinesis .. - make - make install - ``` - - `libcrypto`, `libssl`, and `libcurl` are also required to compile Netdata with Kinesis support enabled. - - Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available. - configuration: - file: - name: 'exporting.conf' - options: - description: | - Netdata automatically computes a partition key for every record with the purpose to distribute records across available shards evenly. - The following options can be defined for this exporter. - folding: - title: 'Config options' - enabled: true - list: - - name: 'enabled' - default_value: 'no' - description: 'Enables or disables an exporting connector instance (yes|no).' - required: true - - name: 'destination' - default_value: 'no' - description: 'Accepts a space separated list of hostnames, IPs (IPv4 and IPv6) and ports to connect to. Netdata will use the first available to send the metrics.' - required: true - detailed_description: | - The format of each item in this list, is: [PROTOCOL:]IP[:PORT]. - - PROTOCOL can be udp or tcp. tcp is the default and only supported by the current exporting engine. - - IP can be XX.XX.XX.XX (IPv4), or [XX:XX...XX:XX] (IPv6). For IPv6 you can to enclose the IP in [] to separate it from the port. - - PORT can be a number of a service name. If omitted, the default port for the exporting connector will be used. - - Example IPv4: - ```yaml - destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242 - ``` - Example IPv6 and IPv4 together: - ```yaml - destination = [ffff:...:0001]:2003 10.11.12.1:2003 - ``` - When multiple servers are defined, Netdata will try the next one when the previous one fails. - - name: 'username' - default_value: 'my_username' - description: 'Username for HTTP authentication' - required: false - - name: 'password' - default_value: 'my_password' - description: 'Password for HTTP authentication' - required: false - - name: 'data source' - default_value: '' - description: 'Selects the kind of data that will be sent to the external database. (as collected|average|sum)' - required: false - - name: 'hostname' - default_value: '[global].hostname' - description: 'The hostname to be used for sending data to the external database server.' - required: false - - name: 'prefix' - default_value: 'Netdata' - description: 'The prefix to add to all metrics.' - required: false - - name: 'update every' - default_value: '10' - description: | - Frequency of sending sending data to the external database, in seconds. - required: false - detailed_description: | - Netdata will add some randomness to this number, to prevent stressing the external server when many Netdata servers - send data to the same database. This randomness does not affect the quality of the data, only the time they are sent. - - name: 'buffer on failures' - default_value: '10' - description: | - The number of iterations (`update every` seconds) to buffer data, when the external database server is not available. - required: false - detailed_description: | - If the server fails to receive the data after that many failures, data loss on the connector instance is expected (Netdata will also log it). - - name: 'timeout ms' - default_value: '2 * update_every * 1000' - description: 'The timeout in milliseconds to wait for the external database server to process the data.' - required: false - - name: 'send hosts matching' - default_value: 'localhost *' - description: | - Hosts filter. Determines which hosts will be sent to the external database. The syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/libnetdata/simple_pattern#simple-patterns). - required: false - detailed_description: | - Includes one or more space separated patterns, using * as wildcard (any number of times within each pattern). - The patterns are checked against the hostname (the localhost is always checked as localhost), allowing us to - filter which hosts will be sent to the external database when this Netdata is a central Netdata aggregating multiple hosts. - - A pattern starting with `!` gives a negative match. So to match all hosts named `*db*` except hosts containing `*child*`, - use `!*child* *db*` (so, the order is important: the first pattern matching the hostname will be used - positive or negative). - - name: 'send charts matching' - default_value: '*' - description: | - One or more space separated patterns (use * as wildcard) checked against both chart id and chart name. - required: false - detailed_description: | - A pattern starting with ! gives a negative match. So to match all charts named apps.* except charts ending in *reads, - use !*reads apps.* (so, the order is important: the first pattern matching the chart id or the chart name will be used, - positive or negative). There is also a URL parameter filter that can be used while querying allmetrics. The URL parameter - has a higher priority than the configuration option. - - name: 'send names instead of ids' - default_value: '' - description: 'Controls the metric names Netdata should send to the external database (yes|no).' - required: false - detailed_description: | - Netdata supports names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names - are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they are - different : disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc. - - name: 'send configured labels' - default_value: '' - description: 'Controls if host labels defined in the `[host labels]` section in `netdata.conf` should be sent to the external database (yes|no).' - required: false - - name: 'send automatic labels' - default_value: '' - description: 'Controls if automatically created labels, like `_os_name` or `_architecture` should be sent to the external database (yes|no).' - required: false - examples: - folding: - enabled: true - title: '' - list: - - name: 'Example configuration' - folding: - enabled: false - description: 'Basic configuration' - config: | - [kinesis:my_instance] - enabled = yes - destination = us-east-1 - - name: 'Configuration with AWS credentials' - folding: - enabled: false - description: 'Add `:https` modifier to the connector type if you need to use the TLS/SSL protocol. For example: `remote_write:https:my_instance`.' - config: | - [kinesis:my_instance] - enabled = yes - destination = us-east-1 - # AWS credentials - aws_access_key_id = your_access_key_id - aws_secret_access_key = your_secret_access_key - # destination stream - stream name = your_stream_name |