summaryrefslogtreecommitdiffstats
path: root/streaming
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 11:19:16 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 12:07:37 +0000
commitb485aab7e71c1625cfc27e0f92c9509f42378458 (patch)
treeae9abe108601079d1679194de237c9a435ae5b55 /streaming
parentAdding upstream version 1.44.3. (diff)
downloadnetdata-b485aab7e71c1625cfc27e0f92c9509f42378458.tar.xz
netdata-b485aab7e71c1625cfc27e0f92c9509f42378458.zip
Adding upstream version 1.45.3+dfsg.upstream/1.45.3+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--src/streaming/common.h (renamed from streaming/common.h)0
-rw-r--r--src/streaming/compression.c (renamed from streaming/compression.c)0
-rw-r--r--src/streaming/compression.h (renamed from streaming/compression.h)2
-rw-r--r--src/streaming/compression_brotli.c (renamed from streaming/compression_brotli.c)0
-rw-r--r--src/streaming/compression_brotli.h (renamed from streaming/compression_brotli.h)0
-rw-r--r--src/streaming/compression_gzip.c (renamed from streaming/compression_gzip.c)0
-rw-r--r--src/streaming/compression_gzip.h (renamed from streaming/compression_gzip.h)0
-rw-r--r--src/streaming/compression_lz4.c (renamed from streaming/compression_lz4.c)0
-rw-r--r--src/streaming/compression_lz4.h (renamed from streaming/compression_lz4.h)0
-rw-r--r--src/streaming/compression_zstd.c (renamed from streaming/compression_zstd.c)0
-rw-r--r--src/streaming/compression_zstd.h (renamed from streaming/compression_zstd.h)0
-rw-r--r--src/streaming/receiver.c (renamed from streaming/receiver.c)94
-rw-r--r--src/streaming/replication.c (renamed from streaming/replication.c)23
-rw-r--r--src/streaming/replication.h (renamed from streaming/replication.h)0
-rw-r--r--src/streaming/rrdpush.c (renamed from streaming/rrdpush.c)151
-rw-r--r--src/streaming/rrdpush.h (renamed from streaming/rrdpush.h)57
-rw-r--r--src/streaming/sender.c (renamed from streaming/sender.c)220
-rw-r--r--src/streaming/stream.conf (renamed from streaming/stream.conf)12
-rw-r--r--streaming/Makefile.am12
-rw-r--r--streaming/README.md580
20 files changed, 226 insertions, 925 deletions
diff --git a/streaming/common.h b/src/streaming/common.h
index b7292f4d0..b7292f4d0 100644
--- a/streaming/common.h
+++ b/src/streaming/common.h
diff --git a/streaming/compression.c b/src/streaming/compression.c
index a94c8a0a6..a94c8a0a6 100644
--- a/streaming/compression.c
+++ b/src/streaming/compression.c
diff --git a/streaming/compression.h b/src/streaming/compression.h
index a67f65b83..285fb2cf6 100644
--- a/streaming/compression.h
+++ b/src/streaming/compression.h
@@ -13,7 +13,7 @@
typedef uint32_t rrdpush_signature_t;
#define RRDPUSH_COMPRESSION_SIGNATURE ((rrdpush_signature_t)('z' | 0x80) | (0x80 << 8) | (0x80 << 16) | ('\n' << 24))
-#define RRDPUSH_COMPRESSION_SIGNATURE_MASK ((rrdpush_signature_t)0xff | (0x80 << 8) | (0x80 << 16) | (0xff << 24))
+#define RRDPUSH_COMPRESSION_SIGNATURE_MASK ((rrdpush_signature_t) 0xffU | (0x80U << 8) | (0x80U << 16) | (0xffU << 24))
#define RRDPUSH_COMPRESSION_SIGNATURE_SIZE sizeof(rrdpush_signature_t)
static inline rrdpush_signature_t rrdpush_compress_encode_signature(size_t compressed_data_size) {
diff --git a/streaming/compression_brotli.c b/src/streaming/compression_brotli.c
index cf52f3bca..cf52f3bca 100644
--- a/streaming/compression_brotli.c
+++ b/src/streaming/compression_brotli.c
diff --git a/streaming/compression_brotli.h b/src/streaming/compression_brotli.h
index 4955e5a82..4955e5a82 100644
--- a/streaming/compression_brotli.h
+++ b/src/streaming/compression_brotli.h
diff --git a/streaming/compression_gzip.c b/src/streaming/compression_gzip.c
index c4ef3af05..c4ef3af05 100644
--- a/streaming/compression_gzip.c
+++ b/src/streaming/compression_gzip.c
diff --git a/streaming/compression_gzip.h b/src/streaming/compression_gzip.h
index 85f34bc6d..85f34bc6d 100644
--- a/streaming/compression_gzip.h
+++ b/src/streaming/compression_gzip.h
diff --git a/streaming/compression_lz4.c b/src/streaming/compression_lz4.c
index f5174134e..f5174134e 100644
--- a/streaming/compression_lz4.c
+++ b/src/streaming/compression_lz4.c
diff --git a/streaming/compression_lz4.h b/src/streaming/compression_lz4.h
index 69f0fadcc..69f0fadcc 100644
--- a/streaming/compression_lz4.h
+++ b/src/streaming/compression_lz4.h
diff --git a/streaming/compression_zstd.c b/src/streaming/compression_zstd.c
index dabc044f7..dabc044f7 100644
--- a/streaming/compression_zstd.c
+++ b/src/streaming/compression_zstd.c
diff --git a/streaming/compression_zstd.h b/src/streaming/compression_zstd.h
index bfabbf89d..bfabbf89d 100644
--- a/streaming/compression_zstd.h
+++ b/src/streaming/compression_zstd.h
diff --git a/streaming/receiver.c b/src/streaming/receiver.c
index a12b94fb4..20f9342df 100644
--- a/streaming/receiver.c
+++ b/src/streaming/receiver.c
@@ -14,7 +14,6 @@ void receiver_state_free(struct receiver_state *rpt) {
freez(rpt->os);
freez(rpt->timezone);
freez(rpt->abbrev_timezone);
- freez(rpt->tags);
freez(rpt->client_ip);
freez(rpt->client_port);
freez(rpt->program_name);
@@ -308,64 +307,59 @@ static size_t streaming_parser(struct receiver_state *rpt, struct plugind *cd, i
// this keeps the parser with its current value
// so, parser needs to be allocated before pushing it
- netdata_thread_cleanup_push(pluginsd_process_thread_cleanup, parser);
-
- {
- bool compressed_connection = rrdpush_decompression_initialize(rpt);
-
- buffered_reader_init(&rpt->reader);
+ netdata_thread_cleanup_push(pluginsd_process_thread_cleanup, parser) {
+ bool compressed_connection = rrdpush_decompression_initialize(rpt);
+ buffered_reader_init(&rpt->reader);
#ifdef NETDATA_LOG_STREAM_RECEIVE
- {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "/tmp/stream-receiver-%s.txt", rpt->host ? rrdhost_hostname(
- rpt->host) : "unknown"
- );
- parser->user.stream_log_fp = fopen(filename, "w");
- parser->user.stream_log_repertoire = PARSER_REP_METADATA;
- }
+ {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "/tmp/stream-receiver-%s.txt", rpt->host ? rrdhost_hostname(
+ rpt->host) : "unknown"
+ );
+ parser->user.stream_log_fp = fopen(filename, "w");
+ parser->user.stream_log_repertoire = PARSER_REP_METADATA;
+ }
#endif
- CLEAN_BUFFER *buffer = buffer_create(sizeof(rpt->reader.read_buffer), NULL);
+ CLEAN_BUFFER *buffer = buffer_create(sizeof(rpt->reader.read_buffer), NULL);
- ND_LOG_STACK lgs[] = {
- ND_LOG_FIELD_CB(NDF_REQUEST, line_splitter_reconstruct_line, &parser->line),
- ND_LOG_FIELD_CB(NDF_NIDL_NODE, parser_reconstruct_node, parser),
- ND_LOG_FIELD_CB(NDF_NIDL_INSTANCE, parser_reconstruct_instance, parser),
- ND_LOG_FIELD_CB(NDF_NIDL_CONTEXT, parser_reconstruct_context, parser),
- ND_LOG_FIELD_END(),
- };
- ND_LOG_STACK_PUSH(lgs);
-
- while(!receiver_should_stop(rpt)) {
+ ND_LOG_STACK lgs[] = {
+ ND_LOG_FIELD_CB(NDF_REQUEST, line_splitter_reconstruct_line, &parser->line),
+ ND_LOG_FIELD_CB(NDF_NIDL_NODE, parser_reconstruct_node, parser),
+ ND_LOG_FIELD_CB(NDF_NIDL_INSTANCE, parser_reconstruct_instance, parser),
+ ND_LOG_FIELD_CB(NDF_NIDL_CONTEXT, parser_reconstruct_context, parser),
+ ND_LOG_FIELD_END(),
+ };
+ ND_LOG_STACK_PUSH(lgs);
- if(!buffered_reader_next_line(&rpt->reader, buffer)) {
- STREAM_HANDSHAKE reason = STREAM_HANDSHAKE_DISCONNECT_UNKNOWN_SOCKET_READ_ERROR;
+ while(!receiver_should_stop(rpt)) {
- bool have_new_data = compressed_connection ? receiver_read_compressed(rpt, &reason)
- : receiver_read_uncompressed(rpt, &reason);
+ if(!buffered_reader_next_line(&rpt->reader, buffer)) {
+ STREAM_HANDSHAKE reason = STREAM_HANDSHAKE_DISCONNECT_UNKNOWN_SOCKET_READ_ERROR;
- if(unlikely(!have_new_data)) {
- receiver_set_exit_reason(rpt, reason, false);
- break;
- }
+ bool have_new_data = compressed_connection ? receiver_read_compressed(rpt, &reason)
+ : receiver_read_uncompressed(rpt, &reason);
- continue;
- }
+ if(unlikely(!have_new_data)) {
+ receiver_set_exit_reason(rpt, reason, false);
+ break;
+ }
- if(unlikely(parser_action(parser, buffer->buffer))) {
- receiver_set_exit_reason(rpt, STREAM_HANDSHAKE_DISCONNECT_PARSER_FAILED, false);
- break;
- }
+ continue;
+ }
- buffer->len = 0;
- buffer->buffer[0] = '\0';
- }
- result = parser->user.data_collections_count;
+ if(unlikely(parser_action(parser, buffer->buffer))) {
+ receiver_set_exit_reason(rpt, STREAM_HANDSHAKE_DISCONNECT_PARSER_FAILED, false);
+ break;
}
- // free parser with the pop function
- netdata_thread_cleanup_pop(1);
+ buffer->len = 0;
+ buffer->buffer[0] = '\0';
+ }
+ result = parser->user.data_collections_count;
+ }
+ netdata_thread_cleanup_pop(1); // free parser with the pop function
return result;
}
@@ -556,7 +550,7 @@ static void rrdpush_receive(struct receiver_state *rpt)
rpt->config.mode = default_rrd_memory_mode;
rpt->config.history = default_rrd_history_entries;
- rpt->config.health_enabled = (int)default_health_enabled;
+ rpt->config.health_enabled = health_plugin_enabled();
rpt->config.alarms_delay = 60;
rpt->config.alarms_history = HEALTH_LOG_DEFAULT_HISTORY;
@@ -633,8 +627,6 @@ static void rrdpush_receive(struct receiver_state *rpt)
rrdpush_parse_compression_order(rpt, order);
}
- (void)appconfig_set_default(&stream_config, rpt->machine_guid, "host tags", (rpt->tags)?rpt->tags:"");
-
// find the host for this receiver
{
// this will also update the host with our system_info
@@ -646,7 +638,6 @@ static void rrdpush_receive(struct receiver_state *rpt)
rpt->timezone,
rpt->abbrev_timezone,
rpt->utc_offset,
- rpt->tags,
rpt->program_name,
rpt->program_version,
rpt->config.update_every,
@@ -699,7 +690,7 @@ static void rrdpush_receive(struct receiver_state *rpt)
#ifdef NETDATA_INTERNAL_CHECKS
netdata_log_info("STREAM '%s' [receive from [%s]:%s]: "
"client willing to stream metrics for host '%s' with machine_guid '%s': "
- "update every = %d, history = %d, memory mode = %s, health %s,%s tags '%s'"
+ "update every = %d, history = %d, memory mode = %s, health %s,%s"
, rpt->hostname
, rpt->client_ip
, rpt->client_port
@@ -714,7 +705,6 @@ static void rrdpush_receive(struct receiver_state *rpt)
#else
, ""
#endif
- , rrdhost_tags(rpt->host)
);
#endif // NETDATA_INTERNAL_CHECKS
diff --git a/streaming/replication.c b/src/streaming/replication.c
index bc34361b3..6f68fedae 100644
--- a/streaming/replication.c
+++ b/src/streaming/replication.c
@@ -162,7 +162,7 @@ static struct replication_query *replication_query_prepare(
}
}
- q->backend = st->rrdhost->db[0].eng->backend;
+ q->backend = st->rrdhost->db[0].eng->seb;
// prepare our array of dimensions
size_t count = 0;
@@ -184,7 +184,7 @@ static struct replication_query *replication_query_prepare(
d->rda = dictionary_acquired_item_dup(rd_dfe.dict, rd_dfe.item);
d->rd = rd;
- storage_engine_query_init(q->backend, rd->tiers[0].db_metric_handle, &d->handle, q->query.after, q->query.before,
+ storage_engine_query_init(q->backend, rd->tiers[0].smh, &d->handle, q->query.after, q->query.before,
q->query.locked_data_collection ? STORAGE_PRIORITY_HIGH : STORAGE_PRIORITY_LOW);
d->enabled = true;
d->skip = false;
@@ -1426,7 +1426,7 @@ static void replication_request_delete_callback(const DICTIONARY_ITEM *item __ma
static bool sender_is_still_connected_for_this_request(struct replication_request *rq) {
return rq->sender_last_flush_ut == rrdpush_sender_get_flush_time(rq->sender);
-};
+}
static bool replication_execute_request(struct replication_request *rq, bool workers) {
bool ret = false;
@@ -1838,17 +1838,16 @@ static void replication_worker_cleanup(void *ptr __maybe_unused) {
static void *replication_worker_thread(void *ptr) {
replication_initialize_workers(false);
- netdata_thread_cleanup_push(replication_worker_cleanup, ptr);
-
- while(service_running(SERVICE_REPLICATION)) {
- if(unlikely(replication_pipeline_execute_next() == REQUEST_QUEUE_EMPTY)) {
- sender_thread_buffer_free();
- worker_is_busy(WORKER_JOB_WAIT);
- worker_is_idle();
- sleep_usec(1 * USEC_PER_SEC);
+ netdata_thread_cleanup_push(replication_worker_cleanup, ptr) {
+ while (service_running(SERVICE_REPLICATION)) {
+ if (unlikely(replication_pipeline_execute_next() == REQUEST_QUEUE_EMPTY)) {
+ sender_thread_buffer_free();
+ worker_is_busy(WORKER_JOB_WAIT);
+ worker_is_idle();
+ sleep_usec(1 * USEC_PER_SEC);
+ }
}
}
-
netdata_thread_cleanup_pop(1);
return NULL;
}
diff --git a/streaming/replication.h b/src/streaming/replication.h
index 507b7c32f..507b7c32f 100644
--- a/streaming/replication.h
+++ b/src/streaming/replication.h
diff --git a/streaming/rrdpush.c b/src/streaming/rrdpush.c
index 7c1df2cad..874d4eb2f 100644
--- a/streaming/rrdpush.c
+++ b/src/streaming/rrdpush.c
@@ -208,14 +208,14 @@ int configured_as_parent() {
// chart labels
static int send_clabels_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) {
BUFFER *wb = (BUFFER *)data;
- buffer_sprintf(wb, "CLABEL \"%s\" \"%s\" %d\n", name, value, ls & ~(RRDLABEL_FLAG_INTERNAL));
+ buffer_sprintf(wb, PLUGINSD_KEYWORD_CLABEL " \"%s\" \"%s\" %d\n", name, value, ls & ~(RRDLABEL_FLAG_INTERNAL));
return 1;
}
static void rrdpush_send_clabels(BUFFER *wb, RRDSET *st) {
if (st->rrdlabels) {
if(rrdlabels_walkthrough_read(st->rrdlabels, send_clabels_callback, wb) > 0)
- buffer_sprintf(wb, "CLABEL_COMMIT\n");
+ buffer_sprintf(wb, PLUGINSD_KEYWORD_CLABEL_COMMIT "\n");
}
}
@@ -302,10 +302,10 @@ static inline bool rrdpush_send_chart_definition(BUFFER *wb, RRDSET *st) {
// send the chart functions
if(stream_has_capability(host->sender, STREAM_CAP_FUNCTIONS))
- rrd_functions_expose_rrdpush(st, wb);
+ rrd_chart_functions_expose_rrdpush(st, wb);
// send the chart local custom variables
- rrdsetvar_print_to_streaming_custom_chart_variables(st, wb);
+ rrdvar_print_to_streaming_custom_chart_variables(st, wb);
if (stream_has_capability(host->sender, STREAM_CAP_REPLICATION)) {
time_t db_first_time_t, db_last_time_t;
@@ -380,7 +380,7 @@ static void rrdpush_send_chart_metrics(BUFFER *wb, RRDSET *st, struct sender_sta
rrddim_foreach_done(rd);
if(unlikely(flags & RRDSET_FLAG_UPSTREAM_SEND_VARIABLES))
- rrdsetvar_print_to_streaming_custom_chart_variables(st, wb);
+ rrdvar_print_to_streaming_custom_chart_variables(st, wb);
buffer_fast_strcat(wb, "END\n", 4);
}
@@ -475,7 +475,7 @@ void rrdset_push_metrics_finished(RRDSET_STREAM_BUFFER *rsb, RRDSET *st) {
if(rsb->v2 && rsb->begin_v2_added) {
if(unlikely(rsb->rrdset_flags & RRDSET_FLAG_UPSTREAM_SEND_VARIABLES))
- rrdsetvar_print_to_streaming_custom_chart_variables(st, rsb->wb);
+ rrdvar_print_to_streaming_custom_chart_variables(st, rsb->wb);
buffer_fast_strcat(rsb->wb, PLUGINSD_KEYWORD_END_V2 "\n", sizeof(PLUGINSD_KEYWORD_END_V2) - 1 + 1);
}
@@ -485,49 +485,6 @@ void rrdset_push_metrics_finished(RRDSET_STREAM_BUFFER *rsb, RRDSET *st) {
*rsb = (RRDSET_STREAM_BUFFER){ .wb = NULL, };
}
-// TODO enable this macro before release
-#define bail_if_no_cap(cap) \
- if(unlikely(!stream_has_capability(host->sender, cap))) { \
- return; \
- }
-
-#define dyncfg_check_can_push(host) \
- if(unlikely(!rrdhost_can_send_definitions_to_parent(host))) \
- return; \
- bail_if_no_cap(STREAM_CAP_DYNCFG)
-
-// assumes job is locked and acquired!!!
-void rrdpush_send_job_status_update(RRDHOST *host, const char *plugin_name, const char *module_name, struct job *job) {
- dyncfg_check_can_push(host);
-
- BUFFER *wb = sender_start(host->sender);
-
- buffer_sprintf(wb, PLUGINSD_KEYWORD_REPORT_JOB_STATUS " %s %s %s %s %d", plugin_name, module_name, job->name, job_status2str(job->status), job->state);
-
- if (job->reason && strlen(job->reason))
- buffer_sprintf(wb, " \"%s\"", job->reason);
-
- buffer_strcat(wb, "\n");
-
- sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_DYNCFG);
-
- sender_thread_buffer_free();
-
- job->dirty = 0;
-}
-
-void rrdpush_send_job_deleted(RRDHOST *host, const char *plugin_name, const char *module_name, const char *job_name) {
- dyncfg_check_can_push(host);
-
- BUFFER *wb = sender_start(host->sender);
-
- buffer_sprintf(wb, PLUGINSD_KEYWORD_DELETE_JOB " %s %s %s\n", plugin_name, module_name, job_name);
-
- sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_DYNCFG);
-
- sender_thread_buffer_free();
-}
-
RRDSET_STREAM_BUFFER rrdset_push_metric_initialize(RRDSET *st, time_t wall_clock_time) {
RRDHOST *host = st->rrdhost;
@@ -554,7 +511,7 @@ RRDSET_STREAM_BUFFER rrdset_push_metric_initialize(RRDSET *st, time_t wall_clock
if(unlikely(host_flags & RRDHOST_FLAG_GLOBAL_FUNCTIONS_UPDATED)) {
BUFFER *wb = sender_start(host->sender);
- rrd_functions_expose_global_rrdpush(host, wb);
+ rrd_global_functions_expose_rrdpush(host, wb, stream_has_capability(host->sender, STREAM_CAP_DYNCFG));
sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_FUNCTIONS);
}
@@ -614,98 +571,13 @@ void rrdpush_send_global_functions(RRDHOST *host) {
BUFFER *wb = sender_start(host->sender);
- rrd_functions_expose_global_rrdpush(host, wb);
+ rrd_global_functions_expose_rrdpush(host, wb, stream_has_capability(host->sender, STREAM_CAP_DYNCFG));
sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_FUNCTIONS);
sender_thread_buffer_free();
}
-void rrdpush_send_dyncfg(RRDHOST *host) {
- dyncfg_check_can_push(host);
-
- BUFFER *wb = sender_start(host->sender);
-
- DICTIONARY *plugins_dict = host->configurable_plugins;
-
- struct configurable_plugin *plug;
- dfe_start_read(plugins_dict, plug) {
- buffer_sprintf(wb, PLUGINSD_KEYWORD_DYNCFG_ENABLE " %s\n", plug->name);
- struct module *mod;
- dfe_start_read(plug->modules, mod) {
- buffer_sprintf(wb, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE " %s %s %s\n", plug->name, mod->name, module_type2str(mod->type));
- struct job *job;
- dfe_start_read(mod->jobs, job) {
- pthread_mutex_lock(&job->lock);
- buffer_sprintf(wb, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB " %s %s %s %s %"PRIu32"\n", plug->name, mod->name, job->name, job_type2str(job->type), job->flags);
- buffer_sprintf(wb, PLUGINSD_KEYWORD_REPORT_JOB_STATUS " %s %s %s %s %d", plug->name, mod->name, job->name, job_status2str(job->status), job->state);
- if (job->reason)
- buffer_sprintf(wb, " \"%s\"", job->reason);
- buffer_sprintf(wb, "\n");
- job->dirty = 0;
- pthread_mutex_unlock(&job->lock);
- } dfe_done(job);
- } dfe_done(mod);
- }
- dfe_done(plug);
-
- sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_DYNCFG);
-
- sender_thread_buffer_free();
-}
-
-void rrdpush_send_dyncfg_enable(RRDHOST *host, const char *plugin_name)
-{
- dyncfg_check_can_push(host);
-
- BUFFER *wb = sender_start(host->sender);
-
- buffer_sprintf(wb, PLUGINSD_KEYWORD_DYNCFG_ENABLE " %s\n", plugin_name);
-
- sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA);
-
- sender_thread_buffer_free();
-}
-
-void rrdpush_send_dyncfg_reg_module(RRDHOST *host, const char *plugin_name, const char *module_name, enum module_type type)
-{
- dyncfg_check_can_push(host);
-
- BUFFER *wb = sender_start(host->sender);
-
- buffer_sprintf(wb, PLUGINSD_KEYWORD_DYNCFG_REGISTER_MODULE " %s %s %s\n", plugin_name, module_name, module_type2str(type));
-
- sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_DYNCFG);
-
- sender_thread_buffer_free();
-}
-
-void rrdpush_send_dyncfg_reg_job(RRDHOST *host, const char *plugin_name, const char *module_name, const char *job_name, enum job_type type, uint32_t flags)
-{
- dyncfg_check_can_push(host);
-
- BUFFER *wb = sender_start(host->sender);
-
- buffer_sprintf(wb, PLUGINSD_KEYWORD_DYNCFG_REGISTER_JOB " %s %s %s %s %"PRIu32"\n", plugin_name, module_name, job_name, job_type2str(type), flags);
-
- sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_DYNCFG);
-
- sender_thread_buffer_free();
-}
-
-void rrdpush_send_dyncfg_reset(RRDHOST *host, const char *plugin_name)
-{
- dyncfg_check_can_push(host);
-
- BUFFER *wb = sender_start(host->sender);
-
- buffer_sprintf(wb, PLUGINSD_KEYWORD_DYNCFG_RESET " %s\n", plugin_name);
-
- sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA);
-
- sender_thread_buffer_free();
-}
-
void rrdpush_send_claimed_id(RRDHOST *host) {
if(!stream_has_capability(host->sender, STREAM_CAP_CLAIM))
return;
@@ -1003,9 +875,6 @@ int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_stri
else if(!strcmp(name, "mc_version"))
rpt->system_info->mc_version = strtoul(value, NULL, 0);
- else if(!strcmp(name, "tags") && !rpt->tags)
- rpt->tags = strdupz(value);
-
else if(!strcmp(name, "ver") && (rpt->capabilities & STREAM_CAP_INVALID))
rpt->capabilities = convert_stream_version_to_capabilities(strtoul(value, NULL, 0), NULL, false);
@@ -1423,6 +1292,7 @@ static struct {
{STREAM_CAP_ZSTD, "ZSTD" },
{STREAM_CAP_GZIP, "GZIP" },
{STREAM_CAP_BROTLI, "BROTLI" },
+ {STREAM_CAP_PROGRESS, "PROGRESS" },
{0 , NULL },
};
@@ -1499,10 +1369,9 @@ STREAM_CAPABILITIES stream_our_capabilities(RRDHOST *host, bool sender) {
STREAM_CAP_BINARY |
STREAM_CAP_INTERPOLATED |
STREAM_CAP_SLOTS |
+ STREAM_CAP_PROGRESS |
STREAM_CAP_COMPRESSIONS_AVAILABLE |
- #ifdef NETDATA_TEST_DYNCFG
STREAM_CAP_DYNCFG |
- #endif
STREAM_CAP_IEEE754 |
STREAM_CAP_DATA_WITH_ML |
0) & ~disabled_capabilities;
diff --git a/streaming/rrdpush.h b/src/streaming/rrdpush.h
index 1459c881e..c73877134 100644
--- a/streaming/rrdpush.h
+++ b/src/streaming/rrdpush.h
@@ -47,11 +47,13 @@ typedef enum {
STREAM_CAP_INTERPOLATED = (1 << 14), // streaming supports interpolated streaming of values
STREAM_CAP_IEEE754 = (1 << 15), // streaming supports binary/hex transfer of double values
STREAM_CAP_DATA_WITH_ML = (1 << 16), // streaming supports transferring anomaly bit
- STREAM_CAP_DYNCFG = (1 << 17), // dynamic configuration of plugins trough streaming
+ // STREAM_CAP_DYNCFG = (1 << 17), // leave this unused for as long as possible
STREAM_CAP_SLOTS = (1 << 18), // the sender can appoint a unique slot for each chart
STREAM_CAP_ZSTD = (1 << 19), // ZSTD compression supported
STREAM_CAP_GZIP = (1 << 20), // GZIP compression supported
STREAM_CAP_BROTLI = (1 << 21), // BROTLI compression supported
+ STREAM_CAP_PROGRESS = (1 << 22), // Functions PROGRESS support
+ STREAM_CAP_DYNCFG = (1 << 23), // support for DYNCFG
STREAM_CAP_INVALID = (1 << 30), // used as an invalid value for capabilities when this is set
// this must be signed int, so don't use the last bit
@@ -197,13 +199,6 @@ typedef enum __attribute__((packed)) {
SENDER_FLAG_OVERFLOW = (1 << 0), // The buffer has been overflown
} SENDER_FLAGS;
-struct function_payload_state {
- BUFFER *payload;
- char *txid;
- char *fn_name;
- char *timeout;
-};
-
struct sender_state {
RRDHOST *host;
pid_t tid; // the thread id of the sender, from gettid()
@@ -234,9 +229,6 @@ struct sender_state {
int rrdpush_sender_pipe[2]; // collector to sender thread signaling
int rrdpush_sender_socket;
- int receiving_function_payload;
- struct function_payload_state function_payload; // state when receiving function with payload
-
uint16_t hops;
struct line_splitter line;
@@ -275,6 +267,16 @@ struct sender_state {
time_t last_buffer_recreate_s; // true when the sender buffer should be re-created
} atomic;
+ struct {
+ bool intercept_input;
+ const char *transaction;
+ const char *timeout_s;
+ const char *function;
+ const char *access;
+ const char *source;
+ BUFFER *payload;
+ } functions;
+
int parent_using_h2o;
};
@@ -345,7 +347,6 @@ struct receiver_state {
char *timezone; // Unused?
char *abbrev_timezone;
int32_t utc_offset;
- char *tags;
char *client_ip; // Duplicated in pluginsd
char *client_port; // Duplicated in pluginsd
char *program_name; // Duplicated in pluginsd
@@ -456,10 +457,6 @@ void *rrdpush_sender_thread(void *ptr);
void rrdpush_send_host_labels(RRDHOST *host);
void rrdpush_send_claimed_id(RRDHOST *host);
void rrdpush_send_global_functions(RRDHOST *host);
-void rrdpush_send_dyncfg(RRDHOST *host);
-
-#define THREAD_TAG_STREAM_RECEIVER "RCVR" // "[host]" is appended
-#define THREAD_TAG_STREAM_SENDER "SNDR" // "[host]" is appended
int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_string, void *h2o_ctx);
void rrdpush_sender_thread_stop(RRDHOST *host, STREAM_HANDSHAKE reason, bool wait);
@@ -661,11 +658,31 @@ static inline const char *rrdhost_health_status_to_string(RRDHOST_HEALTH_STATUS
}
}
+typedef enum __attribute__((packed)) {
+ RRDHOST_DYNCFG_STATUS_UNAVAILABLE = 0,
+ RRDHOST_DYNCFG_STATUS_AVAILABLE,
+} RRDHOST_DYNCFG_STATUS;
+
+static inline const char *rrdhost_dyncfg_status_to_string(RRDHOST_DYNCFG_STATUS status) {
+ switch(status) {
+ default:
+ case RRDHOST_DYNCFG_STATUS_UNAVAILABLE:
+ return "unavailable";
+
+ case RRDHOST_DYNCFG_STATUS_AVAILABLE:
+ return "online";
+ }
+}
+
typedef struct rrdhost_status {
RRDHOST *host;
time_t now;
struct {
+ RRDHOST_DYNCFG_STATUS status;
+ } dyncfg;
+
+ struct {
RRDHOST_DB_STATUS status;
RRDHOST_DB_LIVENESS liveness;
RRD_MEMORY_MODE mode;
@@ -735,14 +752,6 @@ typedef struct rrdhost_status {
void rrdhost_status(RRDHOST *host, time_t now, RRDHOST_STATUS *s);
bool rrdhost_state_cloud_emulation(RRDHOST *host);
-void rrdpush_send_job_status_update(RRDHOST *host, const char *plugin_name, const char *module_name, struct job *job);
-void rrdpush_send_job_deleted(RRDHOST *host, const char *plugin_name, const char *module_name, const char *job_name);
-
-void rrdpush_send_dyncfg_enable(RRDHOST *host, const char *plugin_name);
-void rrdpush_send_dyncfg_reg_module(RRDHOST *host, const char *plugin_name, const char *module_name, enum module_type type);
-void rrdpush_send_dyncfg_reg_job(RRDHOST *host, const char *plugin_name, const char *module_name, const char *job_name, enum job_type type, uint32_t flags);
-void rrdpush_send_dyncfg_reset(RRDHOST *host, const char *plugin_name);
-
bool rrdpush_compression_initialize(struct sender_state *s);
bool rrdpush_decompression_initialize(struct receiver_state *rpt);
void rrdpush_parse_compression_order(struct receiver_state *rpt, const char *order);
diff --git a/streaming/sender.c b/src/streaming/sender.c
index 09b67e968..bb617c5fd 100644
--- a/streaming/sender.c
+++ b/src/streaming/sender.c
@@ -234,11 +234,8 @@ static int rrdpush_sender_thread_custom_host_variables_callback(const DICTIONARY
struct custom_host_variables_callback *tmp = struct_ptr;
BUFFER *wb = tmp->wb;
- if(unlikely(rrdvar_flags(rv) & RRDVAR_FLAG_CUSTOM_HOST_VAR && rrdvar_type(rv) == RRDVAR_TYPE_CALCULATED)) {
- rrdpush_sender_add_host_variable_to_buffer(wb, rv);
- return 1;
- }
- return 0;
+ rrdpush_sender_add_host_variable_to_buffer(wb, rv);
+ return 1;
}
static void rrdpush_sender_thread_send_custom_host_variables(RRDHOST *host) {
@@ -789,7 +786,6 @@ static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_p
"&ml_capable=%d"
"&ml_enabled=%d"
"&mc_version=%d"
- "&tags=%s"
"&ver=%u"
"&NETDATA_INSTANCE_CLOUD_TYPE=%s"
"&NETDATA_INSTANCE_CLOUD_INSTANCE_TYPE=%s"
@@ -835,7 +831,6 @@ static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_p
, host->system_info->ml_capable
, host->system_info->ml_enabled
, host->system_info->mc_version
- , rrdhost_tags(host)
, s->capabilities
, (host->system_info->cloud_provider_type) ? host->system_info->cloud_provider_type : ""
, (host->system_info->cloud_instance_type) ? host->system_info->cloud_instance_type : ""
@@ -952,6 +947,7 @@ static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_p
nd_log(NDLS_DAEMON, NDLP_WARNING,
"STREAM %s [send to %s]: cannot set non-blocking mode for socket.",
rrdhost_hostname(host), s->connected_to);
+ sock_setcloexec(s->rrdpush_sender_socket);
if(sock_enlarge_out(s->rrdpush_sender_socket) < 0)
nd_log(NDLS_DAEMON, NDLP_WARNING,
@@ -1118,9 +1114,8 @@ struct inflight_stream_function {
usec_t received_ut;
};
-void stream_execute_function_callback(BUFFER *func_wb, int code, void *data) {
+static void stream_execute_function_callback(BUFFER *func_wb, int code, void *data) {
struct inflight_stream_function *tmp = data;
-
struct sender_state *s = tmp->sender;
if(rrdhost_can_send_definitions_to_parent(s->host)) {
@@ -1129,7 +1124,7 @@ void stream_execute_function_callback(BUFFER *func_wb, int code, void *data) {
pluginsd_function_result_begin_to_buffer(wb
, string2str(tmp->transaction)
, code
- , functions_content_type_to_format(func_wb->content_type)
+ , content_type_id2string(func_wb->content_type)
, func_wb->expires);
buffer_fast_strcat(wb, buffer_tostring(func_wb), buffer_strlen(func_wb));
@@ -1150,6 +1145,77 @@ void stream_execute_function_callback(BUFFER *func_wb, int code, void *data) {
freez(tmp);
}
+static void stream_execute_function_progress_callback(void *data, size_t done, size_t all) {
+ struct inflight_stream_function *tmp = data;
+ struct sender_state *s = tmp->sender;
+
+ if(rrdhost_can_send_definitions_to_parent(s->host)) {
+ BUFFER *wb = sender_start(s);
+
+ buffer_sprintf(wb, PLUGINSD_KEYWORD_FUNCTION_PROGRESS " '%s' %zu %zu\n",
+ string2str(tmp->transaction), done, all);
+
+ sender_commit(s, wb, STREAM_TRAFFIC_TYPE_FUNCTIONS);
+ }
+}
+
+static void execute_commands_function(struct sender_state *s, const char *command, const char *transaction, const char *timeout_s, const char *function, BUFFER *payload, const char *access, const char *source) {
+ worker_is_busy(WORKER_SENDER_JOB_FUNCTION_REQUEST);
+ nd_log(NDLS_ACCESS, NDLP_INFO, NULL);
+
+ if(!transaction || !*transaction || !timeout_s || !*timeout_s || !function || !*function) {
+ netdata_log_error("STREAM %s [send to %s] %s execution command is incomplete (transaction = '%s', timeout = '%s', function = '%s'). Ignoring it.",
+ rrdhost_hostname(s->host), s->connected_to,
+ command,
+ transaction?transaction:"(unset)",
+ timeout_s?timeout_s:"(unset)",
+ function?function:"(unset)");
+ }
+ else {
+ int timeout = str2i(timeout_s);
+ if(timeout <= 0) timeout = PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT;
+
+ struct inflight_stream_function *tmp = callocz(1, sizeof(struct inflight_stream_function));
+ tmp->received_ut = now_realtime_usec();
+ tmp->sender = s;
+ tmp->transaction = string_strdupz(transaction);
+ BUFFER *wb = buffer_create(1024, &netdata_buffers_statistics.buffers_functions);
+
+ int code = rrd_function_run(s->host, wb, timeout,
+ http_access_from_hex_mapping_old_roles(access), function, false, transaction,
+ stream_execute_function_callback, tmp,
+ stream_has_capability(s, STREAM_CAP_PROGRESS) ? stream_execute_function_progress_callback : NULL,
+ stream_has_capability(s, STREAM_CAP_PROGRESS) ? tmp : NULL,
+ NULL, NULL, payload, source);
+
+ if(code != HTTP_RESP_OK) {
+ if (!buffer_strlen(wb))
+ rrd_call_function_error(wb, "Failed to route request to collector", code);
+ }
+ }
+}
+
+static void cleanup_intercepting_input(struct sender_state *s) {
+ freez((void *)s->functions.transaction);
+ freez((void *)s->functions.timeout_s);
+ freez((void *)s->functions.function);
+ freez((void *)s->functions.access);
+ freez((void *)s->functions.source);
+ buffer_free(s->functions.payload);
+
+ s->functions.transaction = NULL;
+ s->functions.timeout_s = NULL;
+ s->functions.function = NULL;
+ s->functions.payload = NULL;
+ s->functions.access = NULL;
+ s->functions.source = NULL;
+ s->functions.intercept_input = false;
+}
+
+static void execute_commands_cleanup(struct sender_state *s) {
+ cleanup_intercepting_input(s);
+}
+
// This is just a placeholder until the gap filling state machine is inserted
void execute_commands(struct sender_state *s) {
worker_is_busy(WORKER_SENDER_JOB_EXECUTE);
@@ -1163,109 +1229,70 @@ void execute_commands(struct sender_state *s) {
char *start = s->read_buffer, *end = &s->read_buffer[s->read_len], *newline;
*end = 0;
while( start < end && (newline = strchr(start, '\n')) ) {
- *newline = '\0';
+ s->line.count++;
+
+ if(s->functions.intercept_input) {
+ if(strcmp(start, PLUGINSD_CALL_FUNCTION_PAYLOAD_END "\n") == 0) {
+ execute_commands_function(s,
+ PLUGINSD_CALL_FUNCTION_PAYLOAD_END,
+ s->functions.transaction, s->functions.timeout_s,
+ s->functions.function, s->functions.payload,
+ s->functions.access, s->functions.source);
+
+ cleanup_intercepting_input(s);
+ }
+ else
+ buffer_strcat(s->functions.payload, start);
- if (s->receiving_function_payload && unlikely(strcmp(start, PLUGINSD_KEYWORD_FUNCTION_PAYLOAD_END) != 0)) {
- if (buffer_strlen(s->function_payload.payload) != 0)
- buffer_strcat(s->function_payload.payload, "\n");
- buffer_strcat(s->function_payload.payload, start);
start = newline + 1;
continue;
}
- s->line.count++;
+ *newline = '\0';
s->line.num_words = quoted_strings_splitter_pluginsd(start, s->line.words, PLUGINSD_MAX_WORDS);
const char *command = get_word(s->line.words, s->line.num_words, 0);
- if(command && (strcmp(command, PLUGINSD_KEYWORD_FUNCTION) == 0 || strcmp(command, PLUGINSD_KEYWORD_FUNCTION_PAYLOAD_END) == 0)) {
- worker_is_busy(WORKER_SENDER_JOB_FUNCTION_REQUEST);
- nd_log(NDLS_ACCESS, NDLP_INFO, NULL);
-
- char *transaction = s->receiving_function_payload ? s->function_payload.txid : get_word(s->line.words, s->line.num_words, 1);
- char *timeout_s = s->receiving_function_payload ? s->function_payload.timeout : get_word(s->line.words, s->line.num_words, 2);
- char *function = s->receiving_function_payload ? s->function_payload.fn_name : get_word(s->line.words, s->line.num_words, 3);
-
- if(!transaction || !*transaction || !timeout_s || !*timeout_s || !function || !*function) {
- netdata_log_error("STREAM %s [send to %s] %s execution command is incomplete (transaction = '%s', timeout = '%s', function = '%s'). Ignoring it.",
- rrdhost_hostname(s->host), s->connected_to,
- command,
- transaction?transaction:"(unset)",
- timeout_s?timeout_s:"(unset)",
- function?function:"(unset)");
- }
- else {
- int timeout = str2i(timeout_s);
- if(timeout <= 0) timeout = PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT;
-
- struct inflight_stream_function *tmp = callocz(1, sizeof(struct inflight_stream_function));
- tmp->received_ut = now_realtime_usec();
- tmp->sender = s;
- tmp->transaction = string_strdupz(transaction);
- BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX + 1, &netdata_buffers_statistics.buffers_functions);
-
- char *payload = s->receiving_function_payload ? (char *)buffer_tostring(s->function_payload.payload) : NULL;
- int code = rrd_function_run(s->host, wb, timeout, function, false, transaction,
- stream_execute_function_callback, tmp, NULL, NULL, payload);
-
- if(code != HTTP_RESP_OK) {
- if (!buffer_strlen(wb))
- rrd_call_function_error(wb, "Failed to route request to collector", code);
-
- stream_execute_function_callback(wb, code, tmp);
- }
- }
-
- if (s->receiving_function_payload) {
- s->receiving_function_payload = false;
-
- buffer_free(s->function_payload.payload);
- freez(s->function_payload.txid);
- freez(s->function_payload.timeout);
- freez(s->function_payload.fn_name);
+ if(command && strcmp(command, PLUGINSD_CALL_FUNCTION) == 0) {
+ char *transaction = get_word(s->line.words, s->line.num_words, 1);
+ char *timeout_s = get_word(s->line.words, s->line.num_words, 2);
+ char *function = get_word(s->line.words, s->line.num_words, 3);
+ char *access = get_word(s->line.words, s->line.num_words, 4);
+ char *source = get_word(s->line.words, s->line.num_words, 5);
- memset(&s->function_payload, 0, sizeof(struct function_payload_state));
- }
+ execute_commands_function(s, command, transaction, timeout_s, function, NULL, access, source);
}
- else if (command && strcmp(command, PLUGINSD_KEYWORD_FUNCTION_PAYLOAD) == 0) {
- nd_log(NDLS_ACCESS, NDLP_INFO, NULL);
-
- if (s->receiving_function_payload) {
- netdata_log_error("STREAM %s [send to %s] received %s command while already receiving function payload",
- rrdhost_hostname(s->host), s->connected_to, command);
- s->receiving_function_payload = false;
- buffer_free(s->function_payload.payload);
- s->function_payload.payload = NULL;
-
- // TODO send error response
- }
+ else if(command && strcmp(command, PLUGINSD_CALL_FUNCTION_PAYLOAD_BEGIN) == 0) {
+ char *transaction = get_word(s->line.words, s->line.num_words, 1);
+ char *timeout_s = get_word(s->line.words, s->line.num_words, 2);
+ char *function = get_word(s->line.words, s->line.num_words, 3);
+ char *access = get_word(s->line.words, s->line.num_words, 4);
+ char *source = get_word(s->line.words, s->line.num_words, 5);
+ char *content_type = get_word(s->line.words, s->line.num_words, 6);
+
+ s->functions.transaction = strdupz(transaction ? transaction : "");
+ s->functions.timeout_s = strdupz(timeout_s ? timeout_s : "");
+ s->functions.function = strdupz(function ? function : "");
+ s->functions.access = strdupz(access ? access : "");
+ s->functions.source = strdupz(source ? source : "");
+ s->functions.payload = buffer_create(0, NULL);
+ s->functions.payload->content_type = content_type_string2id(content_type);
+ s->functions.intercept_input = true;
+ }
+ else if(command && strcmp(command, PLUGINSD_CALL_FUNCTION_CANCEL) == 0) {
+ worker_is_busy(WORKER_SENDER_JOB_FUNCTION_REQUEST);
+ nd_log(NDLS_ACCESS, NDLP_DEBUG, NULL);
char *transaction = get_word(s->line.words, s->line.num_words, 1);
- char *timeout_s = get_word(s->line.words, s->line.num_words, 2);
- char *function = get_word(s->line.words, s->line.num_words, 3);
-
- if(!transaction || !*transaction || !timeout_s || !*timeout_s || !function || !*function) {
- netdata_log_error("STREAM %s [send to %s] %s execution command is incomplete (transaction = '%s', timeout = '%s', function = '%s'). Ignoring it.",
- rrdhost_hostname(s->host), s->connected_to,
- command,
- transaction?transaction:"(unset)",
- timeout_s?timeout_s:"(unset)",
- function?function:"(unset)");
- }
-
- s->receiving_function_payload = true;
- s->function_payload.payload = buffer_create(4096, &netdata_buffers_statistics.buffers_functions);
-
- s->function_payload.txid = strdupz(get_word(s->line.words, s->line.num_words, 1));
- s->function_payload.timeout = strdupz(get_word(s->line.words, s->line.num_words, 2));
- s->function_payload.fn_name = strdupz(get_word(s->line.words, s->line.num_words, 3));
+ if(transaction && *transaction)
+ rrd_function_cancel(transaction);
}
- else if(command && strcmp(command, PLUGINSD_KEYWORD_FUNCTION_CANCEL) == 0) {
+ else if(command && strcmp(command, PLUGINSD_CALL_FUNCTION_PROGRESS) == 0) {
worker_is_busy(WORKER_SENDER_JOB_FUNCTION_REQUEST);
nd_log(NDLS_ACCESS, NDLP_DEBUG, NULL);
char *transaction = get_word(s->line.words, s->line.num_words, 1);
if(transaction && *transaction)
- rrd_function_cancel(transaction);
+ rrd_function_progress(transaction);
}
else if (command && strcmp(command, PLUGINSD_KEYWORD_REPLAY_CHART) == 0) {
worker_is_busy(WORKER_SENDER_JOB_REPLAY_REQUEST);
@@ -1455,6 +1482,7 @@ static void rrdpush_sender_thread_cleanup_callback(void *ptr) {
rrdpush_sender_thread_close_socket(host);
rrdpush_sender_pipe_close(host, host->sender->rrdpush_sender_pipe, false);
+ execute_commands_cleanup(host->sender);
rrdhost_clear_sender___while_having_sender_mutex(host);
@@ -1655,6 +1683,7 @@ void *rrdpush_sender_thread(void *ptr) {
now_s = now_monotonic_sec();
rrdpush_sender_cbuffer_recreate_timed(s, now_s, false, true);
+ execute_commands_cleanup(s);
rrdhost_flag_clear(s->host, RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS);
s->flags &= ~SENDER_FLAG_OVERFLOW;
@@ -1672,7 +1701,6 @@ void *rrdpush_sender_thread(void *ptr) {
rrdpush_send_claimed_id(s->host);
rrdpush_send_host_labels(s->host);
rrdpush_send_global_functions(s->host);
- rrdpush_send_dyncfg(s->host);
s->replication.oldest_request_after_t = 0;
rrdhost_flag_set(s->host, RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS);
diff --git a/streaming/stream.conf b/src/streaming/stream.conf
index 36213af02..9dc154e2f 100644
--- a/streaming/stream.conf
+++ b/src/streaming/stream.conf
@@ -135,8 +135,6 @@
# You can also set it per host below.
# If you don't set it here, the memory mode of netdata.conf will be used.
# Valid modes:
- # save save on exit, load on start
- # map like swap (continuously syncing to disks - you need SSD)
# ram keep it in RAM, don't touch the disk
# none no database at all (use this on headless proxies)
# dbengine like a traditional database
@@ -181,12 +179,12 @@
#seconds to replicate = 86400
# The duration we want to replicate per each step.
- #replication_step = 600
+ #seconds per replication step = 600
# Indicate whether this child is an ephemeral node. An ephemeral node will become unavailable
# after the specified duration of "cleanup ephemeral hosts after secs" (as defined in the db section of netdata.conf)
# from the time of the node's last connection.
- #is ephemeral node = false
+ #is ephemeral node = no
# -----------------------------------------------------------------------------
# 3. PER SENDING HOST SETTINGS, ON PARENT NETDATA
@@ -225,7 +223,7 @@
# This is ignored for db mode dbengine.
#history = 3600
- # The memory mode of the database: save | map | ram | none | dbengine
+ # The memory mode of the database: ram | none | dbengine
#memory mode = dbengine
# Health / alarms control: yes | no | auto
@@ -257,9 +255,9 @@
#seconds to replicate = 86400
# The duration we want to replicate per each step.
- #replication_step = 600
+ #seconds per replication step = 600
# Indicate whether this child is an ephemeral node. An ephemeral node will become unavailable
# after the specified duration of "cleanup ephemeral hosts after secs" (as defined in the db section of netdata.conf)
# from the time of the node's last connection.
- #is ephemeral node = false
+ #is ephemeral node = no
diff --git a/streaming/Makefile.am b/streaming/Makefile.am
deleted file mode 100644
index 95c31cab0..000000000
--- a/streaming/Makefile.am
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_libconfig_DATA = \
- stream.conf \
- $(NULL)
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/streaming/README.md b/streaming/README.md
deleted file mode 100644
index 03de090e0..000000000
--- a/streaming/README.md
+++ /dev/null
@@ -1,580 +0,0 @@
-# Streaming and replication reference
-
-This document contains advanced streaming options and suggested deployment options for production.
-If you haven't already done so, we suggest you first go through the
-[quick introduction to streaming](https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/enable-streaming.md)
-, for your first, basic parent child setup.
-
-## Configuration
-
-There are two files responsible for configuring Netdata's streaming capabilities: `stream.conf` and `netdata.conf`.
-
-From within your Netdata config directory (typically `/etc/netdata`), [use `edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) to
-open either `stream.conf` or `netdata.conf`.
-
-```
-sudo ./edit-config stream.conf
-sudo ./edit-config netdata.conf
-```
-
-### `stream.conf`
-
-The `stream.conf` file contains three sections. The `[stream]` section is for configuring child nodes.
-
-The `[API_KEY]` and `[MACHINE_GUID]` sections are both for configuring parent nodes, and share the same settings.
-`[API_KEY]` settings affect every child node using that key, whereas `[MACHINE_GUID]` settings affect only the child
-node with a matching GUID.
-
-The file `/var/lib/netdata/registry/netdata.public.unique.id` contains a random GUID that **uniquely identifies each
-node**. This file is automatically generated by Netdata the first time it is started and remains unaltered forever.
-
-#### `[stream]` section
-
-| Setting | Default | Description |
-|-------------------------------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `enabled` | `no` | Whether this node streams metrics to any parent. Change to `yes` to enable streaming. |
-| [`destination`](#destination) | | A space-separated list of parent nodes to attempt to stream to, with the first available parent receiving metrics, using the following format: `[PROTOCOL:]HOST[%INTERFACE][:PORT][:SSL]`. [Read more &rarr;](#destination) |
-| `ssl skip certificate verification` | `yes` | If you want to accept self-signed or expired certificates, set to `yes` and uncomment. |
-| `CApath` | `/etc/ssl/certs/` | The directory where known certificates are found. Defaults to OpenSSL's default path. |
-| `CAfile` | `/etc/ssl/certs/cert.pem` | Add a parent node certificate to the list of known certificates in `CAPath`. |
-| `api key` | | The `API_KEY` to use as the child node. |
-| `timeout seconds` | `60` | The timeout to connect and send metrics to a parent. |
-| `default port` | `19999` | The port to use if `destination` does not specify one. |
-| [`send charts matching`](#send-charts-matching) | `*` | A space-separated list of [Netdata simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) to filter which charts are streamed. [Read more &rarr;](#send-charts-matching) |
-| `buffer size bytes` | `10485760` | The size of the buffer to use when sending metrics. The default `10485760` equals a buffer of 10MB, which is good for 60 seconds of data. Increase this if you expect latencies higher than that. The buffer is flushed on reconnect. |
-| `reconnect delay seconds` | `5` | How long to wait until retrying to connect to the parent node. |
-| `initial clock resync iterations` | `60` | Sync the clock of charts for how many seconds when starting. |
-| `parent using h2o` | `no` | Set to yes if you are connecting to parent trough it's h2o webserver/port. Currently there is no reason to set this to `yes` unless you are testing the new h2o based netdata webserver. When production ready this will be set to `yes` as default. |
-
-### `[API_KEY]` and `[MACHINE_GUID]` sections
-
-| Setting | Default | Description |
-|-----------------------------------------------|----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `enabled` | `no` | Whether this API KEY enabled or disabled. |
-| [`allow from`](#allow-from) | `*` | A space-separated list of [Netdata simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) matching the IPs of nodes that will stream metrics using this API key. [Read more &rarr;](#allow-from) |
-| `default history` | `3600` | The default amount of child metrics history to retain when using the `save`, `map`, or `ram` memory modes. |
-| [`default memory mode`](#default-memory-mode) | `ram` | The [database](https://github.com/netdata/netdata/blob/master/database/README.md) to use for all nodes using this `API_KEY`. Valid settings are `dbengine`, `map`, `save`, `ram`, or `none`. [Read more &rarr;](#default-memory-mode) |
-| `health enabled by default` | `auto` | Whether alerts and notifications should be enabled for nodes using this `API_KEY`. `auto` enables alerts when the child is connected. `yes` enables alerts always, and `no` disables alerts. |
-| `default postpone alarms on connect seconds` | `60` | Postpone alerts and notifications for a period of time after the child connects. |
-| `default health log history` | `432000` | History of health log events (in seconds) kept in the database. |
-| `default proxy enabled` | | Route metrics through a proxy. |
-| `default proxy destination` | | Space-separated list of `IP:PORT` for proxies. |
-| `default proxy api key` | | The `API_KEY` of the proxy. |
-| `default send charts matching` | `*` | See [`send charts matching`](#send-charts-matching). |
-
-#### `destination`
-
-A space-separated list of parent nodes to attempt to stream to, with the first available parent receiving metrics, using
-the following format: `[PROTOCOL:]HOST[%INTERFACE][:PORT][:SSL]`.
-
-- `PROTOCOL`: `tcp`, `udp`, or `unix`. (only tcp and unix are supported by parent nodes)
-- `HOST`: A IPv4, IPv6 IP, or a hostname, or a unix domain socket path. IPv6 IPs should be given with brackets
- `[ip:address]`.
-- `INTERFACE` (IPv6 only): The network interface to use.
-- `PORT`: The port number or service name (`/etc/services`) to use.
-- `SSL`: To enable TLS/SSL encryption of the streaming connection.
-
-To enable TCP streaming to a parent node at `203.0.113.0` on port `20000` and with TLS/SSL encryption:
-
-```conf
-[stream]
- destination = tcp:203.0.113.0:20000:SSL
-```
-
-#### `send charts matching`
-
-A space-separated list of [Netdata simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) to filter which charts are streamed.
-
-The default is a single wildcard `*`, which streams all charts.
-
-To send only a few charts, list them explicitly, or list a group using a wildcard. To send _only_ the `apps.cpu` chart
-and charts with contexts beginning with `system.`:
-
-```conf
-[stream]
- send charts matching = apps.cpu system.*
-```
-
-To send all but a few charts, use `!` to create a negative match. To send _all_ charts _but_ `apps.cpu`:
-
-```conf
-[stream]
- send charts matching = !apps.cpu *
-```
-
-#### `allow from`
-
-A space-separated list of [Netdata simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) matching the IPs of nodes that
-will stream metrics using this API key. The order is important, left to right, as the first positive or negative match is used.
-
-The default is `*`, which accepts all requests including the `API_KEY`.
-
-To allow from only a specific IP address:
-
-```conf
-[API_KEY]
- allow from = 203.0.113.10
-```
-
-To allow all IPs starting with `10.*`, except `10.1.2.3`:
-
-```conf
-[API_KEY]
- allow from = !10.1.2.3 10.*
-```
-
-> If you set specific IP addresses here, and also use the `allow connections` setting in the `[web]` section of
-> `netdata.conf`, be sure to add the IP address there so that it can access the API port.
-
-#### `default memory mode`
-
-The [database](https://github.com/netdata/netdata/blob/master/database/README.md) to use for all nodes using this `API_KEY`. Valid settings are `dbengine`, `ram`,
-`save`, `map`, or `none`.
-
-- `dbengine`: The default, recommended time-series database (TSDB) for Netdata. Stores recent metrics in memory, then
- efficiently spills them to disk for long-term storage.
-- `ram`: Stores metrics _only_ in memory, which means metrics are lost when Netdata stops or restarts. Ideal for
- streaming configurations that use ephemeral nodes.
-- `save`: Stores metrics in memory, but saves metrics to disk when Netdata stops or restarts, and loads historical
- metrics on start.
-- `map`: Stores metrics in memory-mapped files, like swap, with constant disk write.
-- `none`: No database.
-
-When using `default memory mode = dbengine`, the parent node creates a separate instance of the TSDB to store metrics
-from child nodes. The [size of _each_ instance is configurable](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md) with the `page
-cache size` and `dbengine multihost disk space` settings in the `[global]` section in `netdata.conf`.
-
-### `netdata.conf`
-
-| Setting | Default | Description |
-|--------------------------------------------|-------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `[global]` section | | |
-| `memory mode` | `dbengine` | Determines the [database type](https://github.com/netdata/netdata/blob/master/database/README.md) to be used on that node. Other options settings include `none`, `ram`, `save`, and `map`. `none` disables the database at this host. This also disables alerts and notifications, as those can't run without a database. |
-| `[web]` section | | |
-| `mode` | `static-threaded` | Determines the [web server](https://github.com/netdata/netdata/blob/master/web/server/README.md) type. The other option is `none`, which disables the dashboard, API, and registry. |
-| `accept a streaming request every seconds` | `0` | Set a limit on how often a parent node accepts streaming requests from child nodes. `0` equals no limit. If this is set, you may see `... too busy to accept new streaming request. Will be allowed in X secs` in Netdata's `error.log`. |
-
-### Basic use cases
-
-This is an overview of how the main options can be combined:
-
-| target | memory<br/>mode | web<br/>mode | stream<br/>enabled | exporting | alerts | dashboard |
-|--------------------|:---------------:|:------------:|:------------------:|:-------------------------------------:|:------------:|:---------:|
-| headless collector | `none` | `none` | `yes` | only for `data source = as collected` | not possible | no |
-| headless proxy | `none` | not `none` | `yes` | only for `data source = as collected` | not possible | no |
-| proxy with db | not `none` | not `none` | `yes` | possible | possible | yes |
-| central netdata | not `none` | not `none` | `no` | possible | possible | yes |
-
-### Per-child settings
-
-While the `[API_KEY]` section applies settings for any child node using that key, you can also use per-child settings
-with the `[MACHINE_GUID]` section.
-
-For example, the metrics streamed from only the child node with `MACHINE_GUID` are saved in memory, not using the
-default `dbengine` as specified by the `API_KEY`, and alerts are disabled.
-
-```conf
-[API_KEY]
- enabled = yes
- default memory mode = dbengine
- health enabled by default = auto
- allow from = *
-
-[MACHINE_GUID]
- enabled = yes
- memory mode = save
- health enabled = no
-```
-
-### Streaming compression
-
-[![Supported version Netdata Agent release](https://img.shields.io/badge/Supported%20Netdata%20Agent-v1.33%2B-brightgreen)](https://github.com/netdata/netdata/releases/latest)
-
-[![Supported version Netdata Agent release](https://img.shields.io/badge/Supported%20Netdata%20stream%20version-v5%2B-blue)](https://github.com/netdata/netdata/releases/latest)
-
-#### OS dependencies
-* Streaming compression is based on [lz4 v1.9.0+](https://github.com/lz4/lz4). The [lz4 v1.9.0+](https://github.com/lz4/lz4) library must be installed in your OS in order to enable streaming compression. Any lower version will disable Netdata streaming compression for compatibility purposes between the older versions of Netdata agents.
-
-To check if your Netdata Agent supports stream compression run the following GET request in your browser or terminal:
-
-```
-curl -X GET http://localhost:19999/api/v1/info | grep 'Stream Compression'
-```
-
-**Output**
-```
-"buildinfo": "dbengine|Native HTTPS|Netdata Cloud|ACLK Next Generation|New Cloud Protocol Support|ACLK Legacy|TLS Host Verification|Machine Learning|Stream Compression|protobuf|JSON-C|libcrypto|libm|LWS v3.2.2|mosquitto|zlib|apps|cgroup Network Tracking|EBPF|perf|slabinfo",
-```
-> Note: If your OS doesn't support Netdata compression the `buildinfo` will not contain the `Stream Compression` statement.
-
-To check if your Netdata Agent has stream compression enabled, run the following GET request in your browser or terminal:
-
-```
- curl -X GET http://localhost:19999/api/v1/info | grep 'stream-compression'
-```
-**Output**
-```
-"stream-compression": "enabled"
-```
-Note: The `stream-compression` status can be `"enabled" | "disabled" | "N/A"`.
-
-A compressed data packet is determined and decompressed on the fly.
-
-#### Limitations
-This limitation will be withdrawn asap and is work-in-progress.
-
-The current implementation of streaming data compression can support only a few number of dimensions in a chart with names that cannot exceed the size of 16384 bytes. In case your instance hit this limitation, the agent will deactivate compression during runtime to avoid stream corruption. This limitation can be seen in the error.log file with the sequence of the following messages:
-```
-netdata INFO : STREAM_SENDER[child01] : STREAM child01 [send to my.parent.IP]: connecting...
-netdata INFO : STREAM_SENDER[child01] : STREAM child01 [send to my.parent.IP]: initializing communication...
-netdata INFO : STREAM_SENDER[child01] : STREAM child01 [send to my.parent.IP]: waiting response from remote netdata...
-netdata INFO : STREAM_SENDER[child01] : STREAM_COMPRESSION: Compressor Reset
-netdata INFO : STREAM_SENDER[child01] : STREAM child01 [send to my.parent.IP]: established communication with a parent using protocol version 5 - ready to send metrics...
-...
-netdata ERROR : PLUGINSD[go.d] : STREAM_COMPRESSION: Compression Failed - Message size 27847 above compression buffer limit: 16384 (errno 9, Bad file descriptor)
-netdata ERROR : PLUGINSD[go.d] : STREAM_COMPRESSION: Deactivating compression to avoid stream corruption
-netdata ERROR : PLUGINSD[go.d] : STREAM_COMPRESSION child01 [send to my.parent.IP]: Restarting connection without compression
-...
-netdata INFO : STREAM_SENDER[child01] : STREAM child01 [send to my.parent.IP]: connecting...
-netdata INFO : STREAM_SENDER[child01] : STREAM child01 [send to my.parent.IP]: initializing communication...
-netdata INFO : STREAM_SENDER[child01] : STREAM child01 [send to my.parent.IP]: waiting response from remote netdata...
-netdata INFO : STREAM_SENDER[child01] : Stream is uncompressed! One of the agents (my.parent.IP <-> child01) does not support compression OR compression is disabled.
-netdata INFO : STREAM_SENDER[child01] : STREAM child01 [send to my.parent.IP]: established communication with a parent using protocol version 4 - ready to send metrics...
-netdata INFO : WEB_SERVER[static4] : STREAM child01 [send]: sending metrics...
-```
-
-#### How to enable stream compression
-Netdata Agents are shipped with data compression enabled by default. You can also configure which streams will use compression.
-
-With enabled stream compression, a Netdata Agent can negotiate streaming compression with other Netdata Agents. During the negotiation of streaming compression both Netdata Agents should support and enable compression in order to communicate over a compressed stream. The negotiation will result into an uncompressed stream, if one of the Netdata Agents doesn't support **or** has compression disabled.
-
-To enable stream compression:
-
-1. Edit `stream.conf` by using the `edit-config` script:
-`/etc/netdata/edit-config stream.conf`.
-
-2. In the `[stream]` section, set `enable compression` to `yes`.
-```
-# This is the default stream compression flag for an agent.
-
-[stream]
- enable compression = yes | no
-```
-
-
-| Parent | Stream compression | Child |
-|--------------------------------------|--------------------|--------------------------------------|
-| Supported & Enabled | compressed | Supported & Enabled |
-| (Supported & Disabled)/Not supported | uncompressed | Supported & Enabled |
-| Supported & Enabled | uncompressed | (Supported & Disabled)/Not supported |
-| (Supported & Disabled)/Not supported | uncompressed | (Supported & Disabled)/Not supported |
-
-In case of parents with multiple children you can select which streams will be compressed by using the same configuration under the `[API_KEY]`, `[MACHINE_GUID]` section.
-
-This configuration uses AND logic with the default stream compression configuration under the `[stream]` section. This means the stream compression from child to parent will be enabled only if the outcome of the AND logic operation is true (`default compression enabled` && `api key compression enabled`). So both should be enabled to get stream compression otherwise stream compression is disabled.
-```
-[API_KEY]
- enable compression = yes | no
-```
-Same thing applies with the `[MACHINE_GUID]` configuration.
-```
-[MACHINE_GUID]
- enable compression = yes | no
-```
-
-### Securing streaming with TLS/SSL
-
-Netdata does not activate TLS encryption by default. To encrypt streaming connections, you first need to [enable TLS
-support](https://github.com/netdata/netdata/blob/master/web/server/README.md#enabling-tls-support) on the parent. With encryption enabled on the receiving side, you
-need to instruct the child to use TLS/SSL as well. On the child's `stream.conf`, configure the destination as follows:
-
-```
-[stream]
- destination = host:port:SSL
-```
-
-The word `SSL` appended to the end of the destination tells the child that connections must be encrypted.
-
-> While Netdata uses Transport Layer Security (TLS) 1.2 to encrypt communications rather than the obsolete SSL protocol,
-> it's still common practice to refer to encrypted web connections as `SSL`. Many vendors, like Nginx and even Netdata
-> itself, use `SSL` in configuration files, whereas documentation will always refer to encrypted communications as `TLS`
-> or `TLS/SSL`.
-
-#### Certificate verification
-
-When TLS/SSL is enabled on the child, the default behavior will be to not connect with the parent unless the server's
-certificate can be verified via the default chain. In case you want to avoid this check, add the following to the
-child's `stream.conf` file:
-
-```
-[stream]
- ssl skip certificate verification = yes
-```
-
-#### Trusted certificate
-
-If you've enabled [certificate verification](#certificate-verification), you might see errors from the OpenSSL library
-when there's a problem with checking the certificate chain (`X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY`). More
-importantly, OpenSSL will reject self-signed certificates.
-
-Given these known issues, you have two options. If you trust your certificate, you can set the options `CApath` and
-`CAfile` to inform Netdata where your certificates, and the certificate trusted file, are stored.
-
-For more details about these options, you can read about [verify
-locations](https://www.openssl.org/docs/man1.1.1/man3/SSL_CTX_load_verify_locations.html).
-
-Before you changed your streaming configuration, you need to copy your trusted certificate to your child system and add
-the certificate to OpenSSL's list.
-
-On most Linux distributions, the `update-ca-certificates` command searches inside the `/usr/share/ca-certificates`
-directory for certificates. You should double-check by reading the `update-ca-certificate` manual (`man
-update-ca-certificate`), and then change the directory in the below commands if needed.
-
-If you have `sudo` configured on your child system, you can use that to run the following commands. If not, you'll have
-to log in as `root` to complete them.
-
-```
-# mkdir /usr/share/ca-certificates/netdata
-# cp parent_cert.pem /usr/share/ca-certificates/netdata/parent_cert.crt
-# chown -R netdata.netdata /usr/share/ca-certificates/netdata/
-```
-
-First, you create a new directory to store your certificates for Netdata. Next, you need to change the extension on your
-certificate from `.pem` to `.crt` so it's compatible with `update-ca-certificate`. Finally, you need to change
-permissions so the user that runs Netdata can access the directory where you copied in your certificate.
-
-Next, edit the file `/etc/ca-certificates.conf` and add the following line:
-
-```
-netdata/parent_cert.crt
-```
-
-Now you update the list of certificates running the following, again either as `sudo` or `root`:
-
-```
-# update-ca-certificates
-```
-
-> Some Linux distributions have different methods of updating the certificate list. For more details, please read this
-> guide on [adding trusted root certificates](https://github.com/Busindre/How-to-Add-trusted-root-certificates).
-
-Once you update your certificate list, you can set the stream parameters for Netdata to trust the parent certificate.
-Open `stream.conf` for editing and change the following lines:
-
-```
-[stream]
- CApath = /etc/ssl/certs/
- CAfile = /etc/ssl/certs/parent_cert.pem
-```
-
-With this configuration, the `CApath` option tells Netdata to search for trusted certificates inside `/etc/ssl/certs`.
-The `CAfile` option specifies the Netdata parent certificate is located at `/etc/ssl/certs/parent_cert.pem`. With this
-configuration, you can skip using the system's entire list of certificates and use Netdata's parent certificate instead.
-
-#### Expected behaviors
-
-With the introduction of TLS/SSL, the parent-child communication behaves as shown in the table below, depending on the
-following configurations:
-
-- **Parent TLS (Yes/No)**: Whether the `[web]` section in `netdata.conf` has `ssl key` and `ssl certificate`.
-- **Parent port TLS (-/force/optional)**: Depends on whether the `[web]` section `bind to` contains a `^SSL=force` or
- `^SSL=optional` directive on the port(s) used for streaming.
-- **Child TLS (Yes/No)**: Whether the destination in the child's `stream.conf` has `:SSL` at the end.
-- **Child TLS Verification (yes/no)**: Value of the child's `stream.conf` `ssl skip certificate verification`
- parameter (default is no).
-
-| Parent TLS enabled | Parent port SSL | Child TLS | Child SSL Ver. | Behavior |
-|:-------------------|:-----------------|:----------|:---------------|:-----------------------------------------------------------------------------------------------------------------------------------------|
-| No | - | No | no | Legacy behavior. The parent-child stream is unencrypted. |
-| Yes | force | No | no | The parent rejects the child connection. |
-| Yes | -/optional | No | no | The parent-child stream is unencrypted (expected situation for legacy child nodes and newer parent nodes) |
-| Yes | -/force/optional | Yes | no | The parent-child stream is encrypted, provided that the parent has a valid TLS/SSL certificate. Otherwise, the child refuses to connect. |
-| Yes | -/force/optional | Yes | yes | The parent-child stream is encrypted. |
-
-### Proxy
-
-A proxy is a node that receives metrics from a child, then streams them onward to a parent. To configure a proxy,
-configure it as a receiving and a sending Netdata at the same time.
-
-Netdata proxies may or may not maintain a database for the metrics passing through them. When they maintain a database,
-they can also run health checks (alerts and notifications) for the remote host that is streaming the metrics.
-
-In the following example, the proxy receives metrics from a child node using the `API_KEY` of
-`66666666-7777-8888-9999-000000000000`, then stores metrics using `dbengine`. It then uses the `API_KEY` of
-`11111111-2222-3333-4444-555555555555` to proxy those same metrics on to a parent node at `203.0.113.0`.
-
-```conf
-[stream]
- enabled = yes
- destination = 203.0.113.0
- api key = 11111111-2222-3333-4444-555555555555
-
-[66666666-7777-8888-9999-000000000000]
- enabled = yes
- default memory mode = dbengine
-```
-
-### Ephemeral nodes
-
-Netdata can help you monitor ephemeral nodes, such as containers in an auto-scaling infrastructure, by always streaming
-metrics to any number of permanently-running parent nodes.
-
-On the parent, set the following in `stream.conf`:
-
-```conf
-[11111111-2222-3333-4444-555555555555]
- # enable/disable this API key
- enabled = yes
-
- # one hour of data for each of the child nodes
- default history = 3600
-
- # do not save child metrics on disk
- default memory = ram
-
- # alerts checks, only while the child is connected
- health enabled by default = auto
-```
-
-On the child nodes, set the following in `stream.conf`:
-
-```bash
-[stream]
- # stream metrics to another Netdata
- enabled = yes
-
- # the IP and PORT of the parent
- destination = 10.11.12.13:19999
-
- # the API key to use
- api key = 11111111-2222-3333-4444-555555555555
-```
-
-In addition, edit `netdata.conf` on each child node to disable the database and alerts.
-
-```bash
-[global]
- # disable the local database
- memory mode = none
-
-[health]
- # disable health checks
- enabled = no
-```
-
-## Troubleshooting
-
-Both parent and child nodes log information at `/var/log/netdata/error.log`.
-
-If the child manages to connect to the parent you will see something like (on the parent):
-
-```
-2017-03-09 09:38:52: netdata: INFO : STREAM [receive from [10.11.12.86]:38564]: new client connection.
-2017-03-09 09:38:52: netdata: INFO : STREAM xxx [10.11.12.86]:38564: receive thread created (task id 27721)
-2017-03-09 09:38:52: netdata: INFO : STREAM xxx [receive from [10.11.12.86]:38564]: client willing to stream metrics for host 'xxx' with machine_guid '1234567-1976-11e6-ae19-7cdd9077342a': update every = 1, history = 3600, memory mode = ram, health auto
-2017-03-09 09:38:52: netdata: INFO : STREAM xxx [receive from [10.11.12.86]:38564]: initializing communication...
-2017-03-09 09:38:52: netdata: INFO : STREAM xxx [receive from [10.11.12.86]:38564]: receiving metrics...
-```
-
-and something like this on the child:
-
-```
-2017-03-09 09:38:28: netdata: INFO : STREAM xxx [send to box:19999]: connecting...
-2017-03-09 09:38:28: netdata: INFO : STREAM xxx [send to box:19999]: initializing communication...
-2017-03-09 09:38:28: netdata: INFO : STREAM xxx [send to box:19999]: waiting response from remote netdata...
-2017-03-09 09:38:28: netdata: INFO : STREAM xxx [send to box:19999]: established communication - sending metrics...
-```
-
-The following sections describe the most common issues you might encounter when connecting parent and child nodes.
-
-### Slow connections between parent and child
-
-When you have a slow connection between parent and child, Netdata raises a few different errors. Most of the
-errors will appear in the child's `error.log`.
-
-```bash
-netdata ERROR : STREAM_SENDER[CHILD HOSTNAME] : STREAM CHILD HOSTNAME [send to PARENT IP:PARENT PORT]: too many data pending - buffer is X bytes long,
-Y unsent - we have sent Z bytes in total, W on this connection. Closing connection to flush the data.
-```
-
-On the parent side, you may see various error messages, most commonly the following:
-
-```
-netdata ERROR : STREAM_PARENT[CHILD HOSTNAME,[CHILD IP]:CHILD PORT] : read failed: end of file
-```
-
-Another common problem in slow connections is the child sending a partial message to the parent. In this case, the
-parent will write the following to its `error.log`:
-
-```
-ERROR : STREAM_RECEIVER[CHILD HOSTNAME,[CHILD IP]:CHILD PORT] : sent command 'B' which is not known by netdata, for host 'HOSTNAME'. Disabling it.
-```
-
-In this example, `B` was part of a `BEGIN` message that was cut due to connection problems.
-
-Slow connections can also cause problems when the parent misses a message and then receives a command related to the
-missed message. For example, a parent might miss a message containing the child's charts, and then doesn't know
-what to do with the `SET` message that follows. When that happens, the parent will show a message like this:
-
-```
-ERROR : STREAM_RECEIVER[CHILD HOSTNAME,[CHILD IP]:CHILD PORT] : requested a SET on chart 'CHART NAME' of host 'HOSTNAME', without a dimension. Disabling it.
-```
-
-### Child cannot connect to parent
-
-When the child can't connect to a parent for any reason (misconfiguration, networking, firewalls, parent
-down), you will see the following in the child's `error.log`.
-
-```
-ERROR : STREAM_SENDER[HOSTNAME] : Failed to connect to 'PARENT IP', port 'PARENT PORT' (errno 113, No route to host)
-```
-
-### 'Is this a Netdata?'
-
-This question can appear when Netdata starts the stream and receives an unexpected response. This error can appear when
-the parent is using SSL and the child tries to connect using plain text. You will also see this message when
-Netdata connects to another server that isn't Netdata. The complete error message will look like this:
-
-```
-ERROR : STREAM_SENDER[CHILD HOSTNAME] : STREAM child HOSTNAME [send to PARENT HOSTNAME:PARENT PORT]: server is not replying properly (is it a netdata?).
-```
-
-### Stream charts wrong
-
-Chart data needs to be consistent between child and parent nodes. If there are differences between chart data on
-a parent and a child, such as gaps in metrics collection, it most often means your child's `memory mode`
-does not match the parent's. To learn more about the different ways Netdata can store metrics, and thus keep chart
-data consistent, read our [memory mode documentation](https://github.com/netdata/netdata/blob/master/database/README.md).
-
-### Forbidding access
-
-You may see errors about "forbidding access" for a number of reasons. It could be because of a slow connection between
-the parent and child nodes, but it could also be due to other failures. Look in your parent's `error.log` for errors
-that look like this:
-
-```
-STREAM [receive from [child HOSTNAME]:child IP]: `MESSAGE`. Forbidding access."
-```
-
-`MESSAGE` will have one of the following patterns:
-
-- `request without KEY` : The message received is incomplete and the KEY value can be API, hostname, machine GUID.
-- `API key 'VALUE' is not valid GUID`: The UUID received from child does not have the format defined in [RFC
- 4122](https://tools.ietf.org/html/rfc4122)
-- `machine GUID 'VALUE' is not GUID.`: This error with machine GUID is like the previous one.
-- `API key 'VALUE' is not allowed`: This stream has a wrong API key.
-- `API key 'VALUE' is not permitted from this IP`: The IP is not allowed to use STREAM with this parent.
-- `machine GUID 'VALUE' is not allowed.`: The GUID that is trying to send stream is not allowed.
-- `Machine GUID 'VALUE' is not permitted from this IP. `: The IP does not match the pattern or IP allowed to connect to
- use stream.
-
-### Netdata could not create a stream
-
-The connection between parent and child is a stream. When the parent can't convert the initial connection into
-a stream, it will write the following message inside `error.log`:
-
-```
-file descriptor given is not a valid stream
-```
-
-After logging this error, Netdata will close the stream.