summaryrefslogtreecommitdiffstats
path: root/web/api
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 11:19:16 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 12:07:37 +0000
commitb485aab7e71c1625cfc27e0f92c9509f42378458 (patch)
treeae9abe108601079d1679194de237c9a435ae5b55 /web/api
parentAdding upstream version 1.44.3. (diff)
downloadnetdata-upstream.tar.xz
netdata-upstream.zip
Adding upstream version 1.45.3+dfsg.upstream/1.45.3+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--src/libnetdata/functions_evloop/README.md (renamed from web/api/ilove/README.md)0
-rw-r--r--src/web/api/badges/web_buffer_svg.c (renamed from web/api/badges/web_buffer_svg.c)6
-rw-r--r--src/web/api/badges/web_buffer_svg.h (renamed from web/api/badges/web_buffer_svg.h)0
-rw-r--r--src/web/api/exporters/allmetrics.c (renamed from web/api/exporters/allmetrics.c)0
-rw-r--r--src/web/api/exporters/allmetrics.h (renamed from web/api/exporters/allmetrics.h)0
-rw-r--r--src/web/api/exporters/shell/allmetrics_shell.c (renamed from web/api/exporters/shell/allmetrics_shell.c)0
-rw-r--r--src/web/api/exporters/shell/allmetrics_shell.h (renamed from web/api/exporters/shell/allmetrics_shell.h)0
-rw-r--r--src/web/api/formatters/charts2json.c (renamed from web/api/formatters/charts2json.c)0
-rw-r--r--src/web/api/formatters/charts2json.h (renamed from web/api/formatters/charts2json.h)0
-rw-r--r--src/web/api/formatters/csv/csv.c (renamed from web/api/formatters/csv/csv.c)0
-rw-r--r--src/web/api/formatters/csv/csv.h (renamed from web/api/formatters/csv/csv.h)0
-rw-r--r--src/web/api/formatters/json/json.c (renamed from web/api/formatters/json/json.c)0
-rw-r--r--src/web/api/formatters/json/json.h (renamed from web/api/formatters/json/json.h)0
-rw-r--r--src/web/api/formatters/json_wrapper.c (renamed from web/api/formatters/json_wrapper.c)24
-rw-r--r--src/web/api/formatters/json_wrapper.h (renamed from web/api/formatters/json_wrapper.h)0
-rw-r--r--src/web/api/formatters/rrd2json.c (renamed from web/api/formatters/rrd2json.c)0
-rw-r--r--src/web/api/formatters/rrd2json.h (renamed from web/api/formatters/rrd2json.h)0
-rw-r--r--src/web/api/formatters/rrdset2json.c (renamed from web/api/formatters/rrdset2json.c)4
-rw-r--r--src/web/api/formatters/rrdset2json.h (renamed from web/api/formatters/rrdset2json.h)0
-rw-r--r--src/web/api/formatters/ssv/ssv.c (renamed from web/api/formatters/ssv/ssv.c)0
-rw-r--r--src/web/api/formatters/ssv/ssv.h (renamed from web/api/formatters/ssv/ssv.h)0
-rw-r--r--src/web/api/formatters/value/value.c (renamed from web/api/formatters/value/value.c)68
-rw-r--r--src/web/api/formatters/value/value.h (renamed from web/api/formatters/value/value.h)0
-rw-r--r--src/web/api/ilove/ilove.c (renamed from web/api/ilove/ilove.c)0
-rw-r--r--src/web/api/ilove/ilove.h (renamed from web/api/ilove/ilove.h)0
-rw-r--r--src/web/api/ilove/measure-text.js (renamed from web/api/ilove/measure-text.js)0
-rw-r--r--src/web/api/netdata-swagger.json (renamed from web/api/netdata-swagger.json)357
-rw-r--r--src/web/api/netdata-swagger.yaml (renamed from web/api/netdata-swagger.yaml)227
-rw-r--r--src/web/api/queries/average/average.c (renamed from web/api/queries/average/average.c)0
-rw-r--r--src/web/api/queries/average/average.h (renamed from web/api/queries/average/average.h)0
-rw-r--r--src/web/api/queries/countif/countif.c (renamed from web/api/queries/countif/countif.c)0
-rw-r--r--src/web/api/queries/countif/countif.h (renamed from web/api/queries/countif/countif.h)0
-rw-r--r--src/web/api/queries/des/des.c (renamed from web/api/queries/des/des.c)0
-rw-r--r--src/web/api/queries/des/des.h (renamed from web/api/queries/des/des.h)0
-rw-r--r--src/web/api/queries/incremental_sum/incremental_sum.c (renamed from web/api/queries/incremental_sum/incremental_sum.c)0
-rw-r--r--src/web/api/queries/incremental_sum/incremental_sum.h (renamed from web/api/queries/incremental_sum/incremental_sum.h)0
-rw-r--r--src/web/api/queries/max/max.c (renamed from web/api/queries/max/max.c)0
-rw-r--r--src/web/api/queries/max/max.h (renamed from web/api/queries/max/max.h)0
-rw-r--r--src/web/api/queries/median/median.c (renamed from web/api/queries/median/median.c)0
-rw-r--r--src/web/api/queries/median/median.h (renamed from web/api/queries/median/median.h)0
-rw-r--r--src/web/api/queries/min/min.c (renamed from web/api/queries/min/min.c)0
-rw-r--r--src/web/api/queries/min/min.h (renamed from web/api/queries/min/min.h)0
-rw-r--r--src/web/api/queries/percentile/percentile.c (renamed from web/api/queries/percentile/percentile.c)0
-rw-r--r--src/web/api/queries/percentile/percentile.h (renamed from web/api/queries/percentile/percentile.h)0
-rw-r--r--src/web/api/queries/rrdr.c (renamed from web/api/queries/rrdr.c)0
-rw-r--r--src/web/api/queries/rrdr.h (renamed from web/api/queries/rrdr.h)54
-rw-r--r--src/web/api/queries/ses/ses.c (renamed from web/api/queries/ses/ses.c)0
-rw-r--r--src/web/api/queries/ses/ses.h (renamed from web/api/queries/ses/ses.h)0
-rw-r--r--src/web/api/queries/stddev/stddev.c (renamed from web/api/queries/stddev/stddev.c)0
-rw-r--r--src/web/api/queries/stddev/stddev.h (renamed from web/api/queries/stddev/stddev.h)0
-rw-r--r--src/web/api/queries/sum/sum.c (renamed from web/api/queries/sum/sum.c)0
-rw-r--r--src/web/api/queries/sum/sum.h (renamed from web/api/queries/sum/sum.h)0
-rw-r--r--src/web/api/queries/trimmed_mean/trimmed_mean.c (renamed from web/api/queries/trimmed_mean/trimmed_mean.c)0
-rw-r--r--src/web/api/queries/trimmed_mean/trimmed_mean.h (renamed from web/api/queries/trimmed_mean/trimmed_mean.h)0
-rw-r--r--src/web/api/queries/weights.c (renamed from web/api/queries/weights.c)8
-rw-r--r--src/web/api/queries/weights.h (renamed from web/api/queries/weights.h)2
-rw-r--r--src/web/api/tests/valid_urls.c (renamed from web/api/tests/valid_urls.c)0
-rw-r--r--src/web/api/tests/web_api.c (renamed from web/api/tests/web_api.c)0
-rw-r--r--src/web/api/web_api.c (renamed from web/api/web_api.c)91
-rw-r--r--src/web/api/web_api_v1.c (renamed from web/api/web_api_v1.c)556
-rw-r--r--src/web/api/web_api_v1.h (renamed from web/api/web_api_v1.h)9
-rw-r--r--src/web/api/web_api_v2.h (renamed from web/api/web_api_v2.h)0
-rw-r--r--web/api/Makefile.am22
-rw-r--r--web/api/README.md12
-rw-r--r--web/api/badges/Makefile.am8
-rw-r--r--web/api/badges/README.md369
-rw-r--r--web/api/exporters/Makefile.am13
-rw-r--r--web/api/exporters/README.md14
-rw-r--r--web/api/exporters/prometheus/Makefile.am8
-rw-r--r--web/api/exporters/prometheus/README.md14
-rw-r--r--web/api/exporters/shell/Makefile.am8
-rw-r--r--web/api/exporters/shell/README.md73
-rw-r--r--web/api/formatters/Makefile.am15
-rw-r--r--web/api/formatters/README.md82
-rw-r--r--web/api/formatters/csv/Makefile.am8
-rw-r--r--web/api/formatters/csv/README.md148
-rw-r--r--web/api/formatters/json/Makefile.am8
-rw-r--r--web/api/formatters/json/README.md160
-rw-r--r--web/api/formatters/ssv/Makefile.am8
-rw-r--r--web/api/formatters/ssv/README.md63
-rw-r--r--web/api/formatters/value/Makefile.am8
-rw-r--r--web/api/formatters/value/README.md28
-rw-r--r--web/api/health/Makefile.am8
-rw-r--r--web/api/health/README.md222
-rw-r--r--web/api/health/health_cmdapi.c204
-rw-r--r--web/api/health/health_cmdapi.h31
-rw-r--r--web/api/ilove/Makefile.am9
-rw-r--r--web/api/queries/Makefile.am23
-rw-r--r--web/api/queries/README.md181
-rw-r--r--web/api/queries/average/Makefile.am8
-rw-r--r--web/api/queries/average/README.md50
-rw-r--r--web/api/queries/countif/Makefile.am8
-rw-r--r--web/api/queries/countif/README.md40
-rw-r--r--web/api/queries/des/Makefile.am8
-rw-r--r--web/api/queries/des/README.md77
-rw-r--r--web/api/queries/incremental_sum/Makefile.am8
-rw-r--r--web/api/queries/incremental_sum/README.md45
-rw-r--r--web/api/queries/max/Makefile.am8
-rw-r--r--web/api/queries/max/README.md42
-rw-r--r--web/api/queries/median/Makefile.am8
-rw-r--r--web/api/queries/median/README.md64
-rw-r--r--web/api/queries/min/Makefile.am8
-rw-r--r--web/api/queries/min/README.md42
-rw-r--r--web/api/queries/percentile/Makefile.am8
-rw-r--r--web/api/queries/percentile/README.md62
-rw-r--r--web/api/queries/query.c3713
-rw-r--r--web/api/queries/query.h98
-rw-r--r--web/api/queries/ses/Makefile.am8
-rw-r--r--web/api/queries/ses/README.md65
-rw-r--r--web/api/queries/stddev/Makefile.am8
-rw-r--r--web/api/queries/stddev/README.md97
-rw-r--r--web/api/queries/sum/Makefile.am8
-rw-r--r--web/api/queries/sum/README.md45
-rw-r--r--web/api/queries/trimmed_mean/Makefile.am8
-rw-r--r--web/api/queries/trimmed_mean/README.md60
-rw-r--r--web/api/web_api.h57
-rw-r--r--web/api/web_api_v2.c884
117 files changed, 1164 insertions, 7526 deletions
diff --git a/web/api/ilove/README.md b/src/libnetdata/functions_evloop/README.md
index e69de29bb..e69de29bb 100644
--- a/web/api/ilove/README.md
+++ b/src/libnetdata/functions_evloop/README.md
diff --git a/web/api/badges/web_buffer_svg.c b/src/web/api/badges/web_buffer_svg.c
index 9b81ba4fb..23dc96d10 100644
--- a/web/api/badges/web_buffer_svg.c
+++ b/src/web/api/badges/web_buffer_svg.c
@@ -926,7 +926,7 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u
group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE);
}
else if(!strcmp(name, "options")) {
- options |= web_client_api_request_v1_data_options(value);
+ options |= rrdr_options_parse(value);
}
else if(!strcmp(name, "label")) label = value;
else if(!strcmp(name, "units")) units = value;
@@ -996,7 +996,7 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u
int refresh = 0;
if(refresh_str && *refresh_str) {
if(!strcmp(refresh_str, "auto")) {
- if(rc) refresh = rc->update_every;
+ if(rc) refresh = rc->config.update_every;
else if(options & RRDR_OPTION_NOT_ALIGNED)
refresh = st->update_every;
else {
@@ -1029,7 +1029,7 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u
}
if(!units) {
if(alarm) {
- if(rc->units)
+ if(rc->config.units)
units = rrdcalc_units(rc);
else
units = "";
diff --git a/web/api/badges/web_buffer_svg.h b/src/web/api/badges/web_buffer_svg.h
index 71857811f..71857811f 100644
--- a/web/api/badges/web_buffer_svg.h
+++ b/src/web/api/badges/web_buffer_svg.h
diff --git a/web/api/exporters/allmetrics.c b/src/web/api/exporters/allmetrics.c
index cad52a7d5..cad52a7d5 100644
--- a/web/api/exporters/allmetrics.c
+++ b/src/web/api/exporters/allmetrics.c
diff --git a/web/api/exporters/allmetrics.h b/src/web/api/exporters/allmetrics.h
index 3afc42e28..3afc42e28 100644
--- a/web/api/exporters/allmetrics.h
+++ b/src/web/api/exporters/allmetrics.h
diff --git a/web/api/exporters/shell/allmetrics_shell.c b/src/web/api/exporters/shell/allmetrics_shell.c
index c8248c148..c8248c148 100644
--- a/web/api/exporters/shell/allmetrics_shell.c
+++ b/src/web/api/exporters/shell/allmetrics_shell.c
diff --git a/web/api/exporters/shell/allmetrics_shell.h b/src/web/api/exporters/shell/allmetrics_shell.h
index d6598e08d..d6598e08d 100644
--- a/web/api/exporters/shell/allmetrics_shell.h
+++ b/src/web/api/exporters/shell/allmetrics_shell.h
diff --git a/web/api/formatters/charts2json.c b/src/web/api/formatters/charts2json.c
index cab4debae..cab4debae 100644
--- a/web/api/formatters/charts2json.c
+++ b/src/web/api/formatters/charts2json.c
diff --git a/web/api/formatters/charts2json.h b/src/web/api/formatters/charts2json.h
index 7b07af5a0..7b07af5a0 100644
--- a/web/api/formatters/charts2json.h
+++ b/src/web/api/formatters/charts2json.h
diff --git a/web/api/formatters/csv/csv.c b/src/web/api/formatters/csv/csv.c
index d81ddb34e..d81ddb34e 100644
--- a/web/api/formatters/csv/csv.c
+++ b/src/web/api/formatters/csv/csv.c
diff --git a/web/api/formatters/csv/csv.h b/src/web/api/formatters/csv/csv.h
index 666d4c660..666d4c660 100644
--- a/web/api/formatters/csv/csv.h
+++ b/src/web/api/formatters/csv/csv.h
diff --git a/web/api/formatters/json/json.c b/src/web/api/formatters/json/json.c
index 7e3f400e9..7e3f400e9 100644
--- a/web/api/formatters/json/json.c
+++ b/src/web/api/formatters/json/json.c
diff --git a/web/api/formatters/json/json.h b/src/web/api/formatters/json/json.h
index d1ab4f901..d1ab4f901 100644
--- a/web/api/formatters/json/json.h
+++ b/src/web/api/formatters/json/json.h
diff --git a/web/api/formatters/json_wrapper.c b/src/web/api/formatters/json_wrapper.c
index 708a0f1f1..fca5a0b83 100644
--- a/web/api/formatters/json_wrapper.c
+++ b/src/web/api/formatters/json_wrapper.c
@@ -435,23 +435,23 @@ static void query_target_summary_dimensions_v12(BUFFER *wb, QUERY_TARGET *qt, co
qm = tqm;
}
- const char *key, *id, *name;
+ const char *k, *id, *name;
if(v2) {
- key = rrdmetric_acquired_name(rma);
- id = key;
- name = key;
+ k = rrdmetric_acquired_name(rma);
+ id = k;
+ name = k;
}
else {
snprintfz(buf, RRD_ID_LENGTH_MAX * 2 + 1, "%s:%s",
rrdmetric_acquired_id(rma),
rrdmetric_acquired_name(rma));
- key = buf;
+ k = buf;
id = rrdmetric_acquired_id(rma);
name = rrdmetric_acquired_name(rma);
}
- z = dictionary_set(dict, key, NULL, sizeof(*z));
+ z = dictionary_set(dict, k, NULL, sizeof(*z));
if(!z->id) {
z->id = id;
z->name = name;
@@ -618,7 +618,7 @@ static void query_target_summary_alerts_v2(BUFFER *wb, QUERY_TARGET *qt, const c
rw_spinlock_read_lock(&st->alerts.spinlock);
if (st->alerts.base) {
for (RRDCALC *rc = st->alerts.base; rc; rc = rc->next) {
- z = dictionary_set(dict, string2str(rc->name), NULL, sizeof(*z));
+ z = dictionary_set(dict, string2str(rc->config.name), NULL, sizeof(*z));
switch(rc->status) {
case RRDCALC_STATUS_CLEAR:
@@ -887,7 +887,7 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb) {
buffer_json_member_add_time_t(wb, "after", r->view.after);
buffer_json_member_add_time_t(wb, "before", r->view.before);
buffer_json_member_add_string(wb, "group", time_grouping_tostring(qt->request.time_group_method));
- web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", options);
+ rrdr_options_to_buffer_json_array(wb, "options", options);
if(!rrdr_dimension_names(wb, "dimension_names", r, options))
rows = 0;
@@ -939,10 +939,10 @@ static void rrdset_rrdcalc_entries_v2(BUFFER *wb, RRDINSTANCE_ACQUIRED *ria) {
if(rc->status < RRDCALC_STATUS_CLEAR)
continue;
- buffer_json_member_add_object(wb, string2str(rc->name));
+ buffer_json_member_add_object(wb, string2str(rc->config.name));
buffer_json_member_add_string(wb, "st", rrdcalc_status2string(rc->status));
buffer_json_member_add_double(wb, "vl", rc->value);
- buffer_json_member_add_string(wb, "un", string2str(rc->units));
+ buffer_json_member_add_string(wb, "un", string2str(rc->config.units));
buffer_json_object_close(wb);
}
buffer_json_object_close(wb);
@@ -1299,7 +1299,7 @@ void rrdr_json_wrapper_begin2(RRDR *r, BUFFER *wb) {
buffer_json_member_add_object(wb, "request");
{
buffer_json_member_add_string(wb, "format", rrdr_format_to_string(qt->request.format));
- web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", qt->request.options);
+ rrdr_options_to_buffer_json_array(wb, "options", qt->request.options);
buffer_json_member_add_object(wb, "scope");
buffer_json_member_add_string(wb, "scope_nodes", qt->request.scope_nodes);
@@ -1538,7 +1538,7 @@ void rrdr_json_wrapper_end2(RRDR *r, BUFFER *wb) {
if(options & RRDR_OPTION_DEBUG) {
buffer_json_member_add_string(wb, "format", rrdr_format_to_string(format));
- web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", options);
+ rrdr_options_to_buffer_json_array(wb, "options", options);
buffer_json_member_add_string(wb, "time_group", time_grouping_tostring(qt->request.time_group_method));
}
diff --git a/web/api/formatters/json_wrapper.h b/src/web/api/formatters/json_wrapper.h
index a702f3a5c..a702f3a5c 100644
--- a/web/api/formatters/json_wrapper.h
+++ b/src/web/api/formatters/json_wrapper.h
diff --git a/web/api/formatters/rrd2json.c b/src/web/api/formatters/rrd2json.c
index 81c9ad5c7..81c9ad5c7 100644
--- a/web/api/formatters/rrd2json.c
+++ b/src/web/api/formatters/rrd2json.c
diff --git a/web/api/formatters/rrd2json.h b/src/web/api/formatters/rrd2json.h
index f0c0c39ba..f0c0c39ba 100644
--- a/web/api/formatters/rrd2json.h
+++ b/src/web/api/formatters/rrd2json.h
diff --git a/web/api/formatters/rrdset2json.c b/src/web/api/formatters/rrdset2json.c
index 9ada35336..542178b25 100644
--- a/web/api/formatters/rrdset2json.c
+++ b/src/web/api/formatters/rrdset2json.c
@@ -2,7 +2,7 @@
#include "rrdset2json.h"
-static int process_label_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) {
+static int process_label_callback(const char *name, const char *value, RRDLABEL_SRC ls __maybe_unused, void *data) {
BUFFER *wb = data;
buffer_json_member_add_string_or_empty(wb, name, value);
return 1;
@@ -88,7 +88,7 @@ void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memor
buffer_json_member_add_string_or_empty(wb, "id", rrdcalc_name(rc));
buffer_json_member_add_string_or_empty(wb, "status", rrdcalc_status2string(rc->status));
buffer_json_member_add_string_or_empty(wb, "units", rrdcalc_units(rc));
- buffer_json_member_add_int64(wb, "duration", (int64_t)rc->update_every);
+ buffer_json_member_add_int64(wb, "duration", (int64_t)rc->config.update_every);
buffer_json_object_close(wb);
}
}
diff --git a/web/api/formatters/rrdset2json.h b/src/web/api/formatters/rrdset2json.h
index 8b325c65d..8b325c65d 100644
--- a/web/api/formatters/rrdset2json.h
+++ b/src/web/api/formatters/rrdset2json.h
diff --git a/web/api/formatters/ssv/ssv.c b/src/web/api/formatters/ssv/ssv.c
index 2eb26b459..2eb26b459 100644
--- a/web/api/formatters/ssv/ssv.c
+++ b/src/web/api/formatters/ssv/ssv.c
diff --git a/web/api/formatters/ssv/ssv.h b/src/web/api/formatters/ssv/ssv.h
index f7d4a9548..f7d4a9548 100644
--- a/web/api/formatters/ssv/ssv.h
+++ b/src/web/api/formatters/ssv/ssv.h
diff --git a/web/api/formatters/value/value.c b/src/web/api/formatters/value/value.c
index 1d07f62f6..0ec1b1265 100644
--- a/web/api/formatters/value/value.c
+++ b/src/web/api/formatters/value/value.c
@@ -2,7 +2,6 @@
#include "value.h"
-
inline NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null, NETDATA_DOUBLE *anomaly_rate) {
size_t c;
@@ -10,61 +9,64 @@ inline NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
NETDATA_DOUBLE *ar = &r->ar[ i * r->d ];
- NETDATA_DOUBLE sum = 0, min = 0, max = 0, v;
- int all_null = 1, init = 1;
+ NETDATA_DOUBLE sum = 0, min = NAN, max = NAN, v = NAN;
+ size_t dims = 0;
NETDATA_DOUBLE total_anomaly_rate = 0;
// for each dimension
for (c = 0; c < r->d ; c++) {
- if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ if(unlikely(!rrdr_dimension_should_be_exposed(r->od[c], options)))
+ continue;
+
+ if(unlikely((co[c] & RRDR_VALUE_EMPTY)))
continue;
NETDATA_DOUBLE n = cn[c];
- if(unlikely(init)) {
- if(n > 0) {
- min = 0;
- max = n;
- }
- else {
- min = n;
- max = 0;
- }
- init = 0;
- }
+ if(unlikely(!dims))
+ min = max = n;
- if(likely(!(co[c] & RRDR_VALUE_EMPTY))) {
- all_null = 0;
- sum += n;
- }
+ sum += n;
- if(n < min) min = n;
- if(n > max) max = n;
+ if (n < min) min = n;
+ if (n > max) max = n;
total_anomaly_rate += ar[c];
- }
- if(anomaly_rate) {
- if(!r->d) *anomaly_rate = 0;
- else *anomaly_rate = total_anomaly_rate / (NETDATA_DOUBLE)r->d;
+ dims++;
}
- if(unlikely(all_null)) {
- if(likely(all_values_are_null))
+ if(!dims) {
+ if(anomaly_rate)
+ *anomaly_rate = 0;
+
+ if(all_values_are_null)
*all_values_are_null = 1;
- return 0;
- }
- else {
- if(likely(all_values_are_null))
- *all_values_are_null = 0;
+
+ return (options & RRDR_OPTION_NULL2ZERO) ? 0 : NAN;
}
- if(options & RRDR_OPTION_MIN2MAX)
+ if(anomaly_rate)
+ *anomaly_rate = total_anomaly_rate / (NETDATA_DOUBLE)dims;
+
+ if(all_values_are_null)
+ *all_values_are_null = 0;
+
+ if(options & RRDR_OPTION_DIMS_MIN2MAX)
v = max - min;
+ else if(options & RRDR_OPTION_DIMS_AVERAGE)
+ v = sum / (NETDATA_DOUBLE)dims;
+ else if(options & RRDR_OPTION_DIMS_MIN)
+ v = min;
+ else if(options & RRDR_OPTION_DIMS_MAX)
+ v = max;
else
v = sum;
+ if((options & RRDR_OPTION_NULL2ZERO) && (isnan(v) || isinf(v)))
+ v = 0;
+
return v;
}
diff --git a/web/api/formatters/value/value.h b/src/web/api/formatters/value/value.h
index 072ca14f8..072ca14f8 100644
--- a/web/api/formatters/value/value.h
+++ b/src/web/api/formatters/value/value.h
diff --git a/web/api/ilove/ilove.c b/src/web/api/ilove/ilove.c
index 67489ec42..67489ec42 100644
--- a/web/api/ilove/ilove.c
+++ b/src/web/api/ilove/ilove.c
diff --git a/web/api/ilove/ilove.h b/src/web/api/ilove/ilove.h
index 010c19c6b..010c19c6b 100644
--- a/web/api/ilove/ilove.h
+++ b/src/web/api/ilove/ilove.h
diff --git a/web/api/ilove/measure-text.js b/src/web/api/ilove/measure-text.js
index e2a2a6e94..e2a2a6e94 100644
--- a/web/api/ilove/measure-text.js
+++ b/src/web/api/ilove/measure-text.js
diff --git a/web/api/netdata-swagger.json b/src/web/api/netdata-swagger.json
index 6ed3e08b8..3017f5d5c 100644
--- a/web/api/netdata-swagger.json
+++ b/src/web/api/netdata-swagger.json
@@ -3,7 +3,7 @@
"info": {
"title": "Netdata API",
"description": "Real-time performance and health monitoring.",
- "version": "1.38",
+ "version": "v1-rolling",
"contact": {
"name": "Netdata Agent API",
"email": "info@netdata.cloud",
@@ -373,6 +373,201 @@
}
}
},
+ "/api/v1/config": {
+ "get": {
+ "operationId": "getConfig",
+ "tags": [
+ "dyncfg"
+ ],
+ "description": "Get dynamic configuration information.\n",
+ "parameters": [
+ {
+ "name": "action",
+ "in": "query",
+ "description": "The type of information required",
+ "schema": {
+ "type": "string",
+ "enum": [
+ "tree",
+ "schema",
+ "get",
+ "enable",
+ "disable",
+ "restart"
+ ],
+ "default": "tree"
+ }
+ },
+ {
+ "name": "id",
+ "in": "query",
+ "description": "The ID of the dynamic configuration entity",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "path",
+ "in": "query",
+ "description": "Top level path of the configuration entities, used with action 'tree'",
+ "schema": {
+ "type": "string",
+ "default": "/"
+ }
+ },
+ {
+ "name": "timeout",
+ "in": "query",
+ "description": "The timeout in seconds",
+ "schema": {
+ "type": "number",
+ "default": 120
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "The call was successful.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/config_default_response"
+ },
+ {
+ "$ref": "#/components/schemas/config_tree"
+ },
+ {
+ "$ref": "#/components/schemas/config_schema"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "Something is wrong with the request.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/config_default_response"
+ }
+ }
+ }
+ },
+ "404": {
+ "description": "The configurable entity requests is not found.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/config_default_response"
+ }
+ }
+ }
+ }
+ }
+ },
+ "post": {
+ "operationId": "postConfig",
+ "tags": [
+ "dyncfg"
+ ],
+ "description": "Post dynamic configuration to Netdata.\n",
+ "parameters": [
+ {
+ "name": "action",
+ "in": "query",
+ "description": "The type of action required.",
+ "schema": {
+ "type": "string",
+ "enum": [
+ "add",
+ "test",
+ "update"
+ ]
+ }
+ },
+ {
+ "name": "id",
+ "in": "query",
+ "description": "The ID of the dynamic configuration entity to configure.",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "name",
+ "in": "query",
+ "description": "Name of the dynamic configuration entity, used with action 'add'",
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "timeout",
+ "in": "query",
+ "description": "The timeout in seconds",
+ "schema": {
+ "type": "number",
+ "default": 120
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "The call was successful. This also means the configuration is currently running.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/config_default_response"
+ }
+ }
+ }
+ },
+ "202": {
+ "description": "The call was successful. The configuration has been accepted, but its status is not yet known.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/config_default_response"
+ }
+ }
+ }
+ },
+ "299": {
+ "description": "The call was successful. The configuration has been accepted, but a restart is required to apply it.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/config_default_response"
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "Something is wrong with the request.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/config_default_response"
+ }
+ }
+ }
+ },
+ "404": {
+ "description": "The configurable entity requests is not found.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/config_default_response"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
"/api/v2/data": {
"get": {
"operationId": "dataQuery2",
@@ -385,7 +580,7 @@
{
"name": "group_by",
"in": "query",
- "description": "A comma separated list of the groupings required.\nAll possible values can be combined together, except `selected`. If `selected` is given in the list, all others are ignored.\nThe order they are placed in the list is currently ignored.\n",
+ "description": "A comma separated list of the groupings required.\nAll possible values can be combined together, except `selected`. If `selected` is given in the list, all others are ignored.\nThe order they are placed in the list is currently ignored.\nThis parameter is also accepted as `group_by[0]` and `group_by[1]` when multiple grouping passes are required.\n",
"required": false,
"schema": {
"type": "array",
@@ -410,7 +605,7 @@
{
"name": "group_by_label",
"in": "query",
- "description": "A comma separated list of the label keys to group by their values. The order of the labels in the list is respected.\n",
+ "description": "A comma separated list of the label keys to group by their values. The order of the labels in the list is respected.\nThis parameter is also accepted as `group_by_label[0]` and `group_by_label[1]` when multiple grouping passes are required.\n",
"required": false,
"schema": {
"type": "string",
@@ -421,7 +616,7 @@
{
"name": "aggregation",
"in": "query",
- "description": "The aggregation function to apply when grouping metrics together.\nWhen option `raw` is given, `average` and `avg` behave like `sum` and the caller is expected to calculate the average.\n",
+ "description": "The aggregation function to apply when grouping metrics together.\nWhen option `raw` is given, `average` and `avg` behave like `sum` and the caller is expected to calculate the average.\nThis parameter is also accepted as `aggregation[0]` and `aggregation[1]` when multiple grouping passes are required.\n",
"required": false,
"schema": {
"type": "string",
@@ -430,7 +625,8 @@
"max",
"avg",
"average",
- "sum"
+ "sum",
+ "percentage"
],
"default": "average"
}
@@ -3689,8 +3885,11 @@
"type": "integer"
},
"count": {
- "description": "The number of metrics aggregated into this point. This exists only when the option `raw` is given to the query.\n",
+ "description": "The number of metrics aggregated into this point.\nThis exists only when the option `raw` is given to the query and the final aggregation point is NOT `percentage`.\n",
"type": "integer"
+ },
+ "hidden": {
+ "description": "The sum of the non-selected dimensions aggregated for this group item point.\nThis exists only when the option `raw` is given to the query and the final aggregation method is `percentage`.\n"
}
}
},
@@ -4415,7 +4614,151 @@
},
"weighted_dimension": {
"type": "number"
+ },
+ "config_schema": {
+ "type": "object",
+ "properties": {
+ "jsonSchema": {
+ "type": "object",
+ "description": "Standard JSON Schema object describing the schema of each configurable entity."
+ },
+ "uiSchema": {
+ "type": "object",
+ "description": "Schema for react-json-schema-form to drive the UI. Provides additional UI-specific configuration."
+ }
+ }
+ },
+ "config_tree": {
+ "type": "object",
+ "properties": {
+ "version": {
+ "type": "integer",
+ "description": "The version of dynamic configuration supported by the Netdata agent."
+ },
+ "tree": {
+ "type": "object",
+ "description": "A map of configuration entity paths, each containing one or more configurable entities.",
+ "additionalProperties": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/config_entity"
+ }
+ }
+ },
+ "attention": {
+ "$ref": "#/components/schemas/config_attention"
+ }
+ }
+ },
+ "config_entity": {
+ "type": "object",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "Can be 'single' for entities appearing once, 'template' for entities supporting multiple instances, or 'job' for jobs belonging to a template."
+ },
+ "status": {
+ "type": "string",
+ "description": "The current status of the entity. Values include 'accepted', 'running', 'failed', 'disabled', 'incomplete', or 'orphan'."
+ },
+ "cmds": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "An array of the possible actions supported by this entity."
+ },
+ "source_type": {
+ "type": "string",
+ "description": "The source type of the configuration (e.g., 'internal', 'stock', 'user', 'discovered', 'dyncfg')."
+ },
+ "source": {
+ "type": "string",
+ "description": "Additional information about the source, formatted as comma-separated name-value pairs."
+ },
+ "sync": {
+ "type": "boolean",
+ "description": "Indicates if this is an internal module (true) or an external plugin (false)."
+ },
+ "user_disabled": {
+ "type": "boolean",
+ "description": "True if the entity is disabled by the user."
+ },
+ "restart_required": {
+ "type": "boolean",
+ "description": "True if the entity requires a restart after addition or update."
+ },
+ "plugin_rejected": {
+ "type": "boolean",
+ "description": "True if a previously saved configuration failed to apply after a restart."
+ },
+ "payload": {
+ "type": "object",
+ "description": "Object containing at least an 'available' boolean indicating if there's a saved configuration for this entity.",
+ "properties": {
+ "available": {
+ "type": "boolean"
+ }
+ }
+ },
+ "saves": {
+ "type": "integer",
+ "description": "The number of times this configuration has been saved to disk by the dynamic configuration manager."
+ },
+ "created_ut": {
+ "type": "integer",
+ "format": "int64",
+ "description": "The timestamp in microseconds when this dynamic configuration was first created."
+ },
+ "modified_ut": {
+ "type": "integer",
+ "format": "int64",
+ "description": "The timestamp in microseconds when this dynamic configuration was last modified."
+ },
+ "template": {
+ "type": "string",
+ "description": "Shows the template the job belongs to, applicable when type is 'job'."
+ }
+ }
+ },
+ "config_attention": {
+ "type": "object",
+ "properties": {
+ "degraded": {
+ "type": "boolean"
+ },
+ "restart_required": {
+ "type": "integer"
+ },
+ "plugin_rejected": {
+ "type": "integer"
+ },
+ "status_failed": {
+ "type": "integer"
+ },
+ "status_incomplete": {
+ "type": "integer"
+ }
+ }
+ },
+ "config_default_response": {
+ "type": "object",
+ "properties": {
+ "status": {
+ "type": "integer",
+ "description": "The HTTP status code of the response."
+ },
+ "message": {
+ "type": "string",
+ "description": "A descriptive message about the response or the action taken."
+ },
+ "data": {
+ "type": "object",
+ "description": "The data payload of the response, contents vary depending on the specific request and action.",
+ "additionalProperties": true
+ }
+ }
}
}
}
-}
+} \ No newline at end of file
diff --git a/web/api/netdata-swagger.yaml b/src/web/api/netdata-swagger.yaml
index 7fa852f4a..ad007ba47 100644
--- a/web/api/netdata-swagger.yaml
+++ b/src/web/api/netdata-swagger.yaml
@@ -2,7 +2,7 @@ openapi: 3.0.0
info:
title: Netdata API
description: Real-time performance and health monitoring.
- version: "1.38"
+ version: "v1-rolling"
contact:
name: Netdata Agent API
email: info@netdata.cloud
@@ -226,6 +226,129 @@ paths:
description: No context id was supplied in the request.
"404":
description: No context with the given id is found.
+ /api/v1/config:
+ get:
+ operationId: getConfig
+ tags:
+ - dyncfg
+ description: |
+ Get dynamic configuration information.
+ parameters:
+ - name: action
+ in: query
+ description: The type of information required
+ schema:
+ type: string
+ enum:
+ - tree
+ - schema
+ - get
+ - enable
+ - disable
+ - restart
+ default: tree
+ - name: id
+ in: query
+ description: The ID of the dynamic configuration entity
+ schema:
+ type: string
+ - name: path
+ in: query
+ description: Top level path of the configuration entities, used with action 'tree'
+ schema:
+ type: string
+ default: '/'
+ - name: timeout
+ in: query
+ description: The timeout in seconds
+ schema:
+ type: number
+ default: 120
+ responses:
+ "200":
+ description: The call was successful.
+ content:
+ application/json:
+ schema:
+ oneOf:
+ - $ref: '#/components/schemas/config_default_response'
+ - $ref: '#/components/schemas/config_tree'
+ - $ref: "#/components/schemas/config_schema"
+ "400":
+ description: Something is wrong with the request.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/config_default_response'
+ "404":
+ description: The configurable entity requests is not found.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/config_default_response'
+ post:
+ operationId: postConfig
+ tags:
+ - dyncfg
+ description: |
+ Post dynamic configuration to Netdata.
+ parameters:
+ - name: action
+ in: query
+ description: The type of action required.
+ schema:
+ type: string
+ enum:
+ - add
+ - test
+ - update
+ - name: id
+ in: query
+ description: The ID of the dynamic configuration entity to configure.
+ schema:
+ type: string
+ - name: name
+ in: query
+ description: Name of the dynamic configuration entity, used with action 'add'
+ schema:
+ type: string
+ - name: timeout
+ in: query
+ description: The timeout in seconds
+ schema:
+ type: number
+ default: 120
+ responses:
+ "200":
+ description: The call was successful. This also means the configuration is currently running.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/config_default_response'
+ "202":
+ description: The call was successful. The configuration has been accepted, but its status is not yet known.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/config_default_response'
+ "299":
+ description: The call was successful. The configuration has been accepted, but a restart is required to apply it.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/config_default_response'
+ "400":
+ description: Something is wrong with the request.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/config_default_response'
+ "404":
+ description: The configurable entity requests is not found.
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/config_default_response'
/api/v2/data:
get:
operationId: dataQuery2
@@ -3259,3 +3382,105 @@ components:
$ref: '#/components/schemas/weighted_dimension'
weighted_dimension:
type: number
+ config_schema:
+ type: object
+ properties:
+ jsonSchema:
+ type: object
+ description: Standard JSON Schema object describing the schema of each configurable entity.
+ uiSchema:
+ type: object
+ description: Schema for react-json-schema-form to drive the UI. Provides additional UI-specific configuration.
+ config_tree:
+ type: object
+ properties:
+ version:
+ type: integer
+ description: The version of dynamic configuration supported by the Netdata agent.
+ tree:
+ type: object
+ description: A map of configuration entity paths, each containing one or more configurable entities.
+ additionalProperties:
+ type: object
+ additionalProperties:
+ $ref: '#/components/schemas/config_entity'
+ attention:
+ $ref: '#/components/schemas/config_attention'
+ config_entity:
+ type: object
+ properties:
+ type:
+ type: string
+ description: Can be 'single' for entities appearing once, 'template' for entities supporting multiple instances, or 'job' for jobs belonging to a template.
+ status:
+ type: string
+ description: The current status of the entity. Values include 'accepted', 'running', 'failed', 'disabled', 'incomplete', or 'orphan'.
+ cmds:
+ type: array
+ items:
+ type: string
+ description: An array of the possible actions supported by this entity.
+ source_type:
+ type: string
+ description: The source type of the configuration (e.g., 'internal', 'stock', 'user', 'discovered', 'dyncfg').
+ source:
+ type: string
+ description: Additional information about the source, formatted as comma-separated name-value pairs.
+ sync:
+ type: boolean
+ description: Indicates if this is an internal module (true) or an external plugin (false).
+ user_disabled:
+ type: boolean
+ description: True if the entity is disabled by the user.
+ restart_required:
+ type: boolean
+ description: True if the entity requires a restart after addition or update.
+ plugin_rejected:
+ type: boolean
+ description: True if a previously saved configuration failed to apply after a restart.
+ payload:
+ type: object
+ description: Object containing at least an 'available' boolean indicating if there's a saved configuration for this entity.
+ properties:
+ available:
+ type: boolean
+ saves:
+ type: integer
+ description: The number of times this configuration has been saved to disk by the dynamic configuration manager.
+ created_ut:
+ type: integer
+ format: int64
+ description: The timestamp in microseconds when this dynamic configuration was first created.
+ modified_ut:
+ type: integer
+ format: int64
+ description: The timestamp in microseconds when this dynamic configuration was last modified.
+ template:
+ type: string
+ description: Shows the template the job belongs to, applicable when type is 'job'.
+ config_attention:
+ type: object
+ properties:
+ degraded:
+ type: boolean
+ restart_required:
+ type: integer
+ plugin_rejected:
+ type: integer
+ status_failed:
+ type: integer
+ status_incomplete:
+ type: integer
+ config_default_response:
+ type: object
+ properties:
+ status:
+ type: integer
+ description: The HTTP status code of the response.
+ message:
+ type: string
+ description: A descriptive message about the response or the action taken.
+ data:
+ type: object
+ description: The data payload of the response, contents vary depending on the specific request and action.
+ additionalProperties: true
diff --git a/web/api/queries/average/average.c b/src/web/api/queries/average/average.c
index f54dcb243..f54dcb243 100644
--- a/web/api/queries/average/average.c
+++ b/src/web/api/queries/average/average.c
diff --git a/web/api/queries/average/average.h b/src/web/api/queries/average/average.h
index 2d77cc571..2d77cc571 100644
--- a/web/api/queries/average/average.h
+++ b/src/web/api/queries/average/average.h
diff --git a/web/api/queries/countif/countif.c b/src/web/api/queries/countif/countif.c
index 8a3a1f50b..8a3a1f50b 100644
--- a/web/api/queries/countif/countif.c
+++ b/src/web/api/queries/countif/countif.c
diff --git a/web/api/queries/countif/countif.h b/src/web/api/queries/countif/countif.h
index 896b9d873..896b9d873 100644
--- a/web/api/queries/countif/countif.h
+++ b/src/web/api/queries/countif/countif.h
diff --git a/web/api/queries/des/des.c b/src/web/api/queries/des/des.c
index d0e234e23..d0e234e23 100644
--- a/web/api/queries/des/des.c
+++ b/src/web/api/queries/des/des.c
diff --git a/web/api/queries/des/des.h b/src/web/api/queries/des/des.h
index 3153d497c..3153d497c 100644
--- a/web/api/queries/des/des.h
+++ b/src/web/api/queries/des/des.h
diff --git a/web/api/queries/incremental_sum/incremental_sum.c b/src/web/api/queries/incremental_sum/incremental_sum.c
index 88072f297..88072f297 100644
--- a/web/api/queries/incremental_sum/incremental_sum.c
+++ b/src/web/api/queries/incremental_sum/incremental_sum.c
diff --git a/web/api/queries/incremental_sum/incremental_sum.h b/src/web/api/queries/incremental_sum/incremental_sum.h
index f110c5861..f110c5861 100644
--- a/web/api/queries/incremental_sum/incremental_sum.h
+++ b/src/web/api/queries/incremental_sum/incremental_sum.h
diff --git a/web/api/queries/max/max.c b/src/web/api/queries/max/max.c
index cc5999a29..cc5999a29 100644
--- a/web/api/queries/max/max.c
+++ b/src/web/api/queries/max/max.c
diff --git a/web/api/queries/max/max.h b/src/web/api/queries/max/max.h
index c26bb79ad..c26bb79ad 100644
--- a/web/api/queries/max/max.h
+++ b/src/web/api/queries/max/max.h
diff --git a/web/api/queries/median/median.c b/src/web/api/queries/median/median.c
index 9865b485c..9865b485c 100644
--- a/web/api/queries/median/median.c
+++ b/src/web/api/queries/median/median.c
diff --git a/web/api/queries/median/median.h b/src/web/api/queries/median/median.h
index 3d6d35925..3d6d35925 100644
--- a/web/api/queries/median/median.h
+++ b/src/web/api/queries/median/median.h
diff --git a/web/api/queries/min/min.c b/src/web/api/queries/min/min.c
index cefa7cf31..cefa7cf31 100644
--- a/web/api/queries/min/min.c
+++ b/src/web/api/queries/min/min.c
diff --git a/web/api/queries/min/min.h b/src/web/api/queries/min/min.h
index 3c53dfd1d..3c53dfd1d 100644
--- a/web/api/queries/min/min.h
+++ b/src/web/api/queries/min/min.h
diff --git a/web/api/queries/percentile/percentile.c b/src/web/api/queries/percentile/percentile.c
index da3b32696..da3b32696 100644
--- a/web/api/queries/percentile/percentile.c
+++ b/src/web/api/queries/percentile/percentile.c
diff --git a/web/api/queries/percentile/percentile.h b/src/web/api/queries/percentile/percentile.h
index 0532f9d3f..0532f9d3f 100644
--- a/web/api/queries/percentile/percentile.h
+++ b/src/web/api/queries/percentile/percentile.h
diff --git a/web/api/queries/rrdr.c b/src/web/api/queries/rrdr.c
index 2a0016891..2a0016891 100644
--- a/web/api/queries/rrdr.c
+++ b/src/web/api/queries/rrdr.c
diff --git a/web/api/queries/rrdr.h b/src/web/api/queries/rrdr.h
index e02e00675..d36d3f5b3 100644
--- a/web/api/queries/rrdr.h
+++ b/src/web/api/queries/rrdr.h
@@ -21,34 +21,36 @@ typedef enum rrdr_options {
RRDR_OPTION_NONZERO = (1 << 0), // don't output dimensions with just zero values
RRDR_OPTION_REVERSED = (1 << 1), // output the rows in reverse order (oldest to newest)
RRDR_OPTION_ABSOLUTE = (1 << 2), // values positive, for DATASOURCE_SSV before summing
- RRDR_OPTION_MIN2MAX = (1 << 3), // when adding dimensions, use max - min, instead of sum
- RRDR_OPTION_SECONDS = (1 << 4), // output seconds, instead of dates
- RRDR_OPTION_MILLISECONDS = (1 << 5), // output milliseconds, instead of dates
- RRDR_OPTION_NULL2ZERO = (1 << 6), // do not show nulls, convert them to zeros
- RRDR_OPTION_OBJECTSROWS = (1 << 7), // each row of values should be an object, not an array
- RRDR_OPTION_GOOGLE_JSON = (1 << 8), // comply with google JSON/JSONP specs
- RRDR_OPTION_JSON_WRAP = (1 << 9), // wrap the response in a JSON header with info about the result
- RRDR_OPTION_LABEL_QUOTES = (1 << 10), // in CSV output, wrap header labels in double quotes
- RRDR_OPTION_PERCENTAGE = (1 << 11), // give values as percentage of total
- RRDR_OPTION_NOT_ALIGNED = (1 << 12), // do not align charts for persistent timeframes
- RRDR_OPTION_DISPLAY_ABS = (1 << 13), // for badges, display the absolute value, but calculate colors with sign
- RRDR_OPTION_MATCH_IDS = (1 << 14), // when filtering dimensions, match only IDs
- RRDR_OPTION_MATCH_NAMES = (1 << 15), // when filtering dimensions, match only names
- RRDR_OPTION_NATURAL_POINTS = (1 << 16), // return the natural points of the database
- RRDR_OPTION_VIRTUAL_POINTS = (1 << 17), // return virtual points
- RRDR_OPTION_ANOMALY_BIT = (1 << 18), // Return the anomaly bit stored in each collected_number
- RRDR_OPTION_RETURN_RAW = (1 << 19), // Return raw data for aggregating across multiple nodes
- RRDR_OPTION_RETURN_JWAR = (1 << 20), // Return anomaly rates in jsonwrap
- RRDR_OPTION_SELECTED_TIER = (1 << 21), // Use the selected tier for the query
- RRDR_OPTION_ALL_DIMENSIONS = (1 << 22), // Return the full dimensions list
- RRDR_OPTION_SHOW_DETAILS = (1 << 23), // v2 returns detailed object tree
- RRDR_OPTION_DEBUG = (1 << 24), // v2 returns request description
- RRDR_OPTION_MINIFY = (1 << 25), // remove JSON spaces and newlines from JSON output
- RRDR_OPTION_GROUP_BY_LABELS = (1 << 26), // v2 returns flattened labels per dimension of the chart
+ RRDR_OPTION_DIMS_MIN2MAX = (1 << 3), // when adding dimensions, use max - min, instead of sum
+ RRDR_OPTION_DIMS_AVERAGE = (1 << 4), // when adding dimensions, use average, instead of sum
+ RRDR_OPTION_DIMS_MIN = (1 << 5), // when adding dimensions, use minimum, instead of sum
+ RRDR_OPTION_DIMS_MAX = (1 << 6), // when adding dimensions, use maximum, instead of sum
+ RRDR_OPTION_SECONDS = (1 << 7), // output seconds, instead of dates
+ RRDR_OPTION_MILLISECONDS = (1 << 8), // output milliseconds, instead of dates
+ RRDR_OPTION_NULL2ZERO = (1 << 9), // do not show nulls, convert them to zeros
+ RRDR_OPTION_OBJECTSROWS = (1 << 10), // each row of values should be an object, not an array
+ RRDR_OPTION_GOOGLE_JSON = (1 << 11), // comply with google JSON/JSONP specs
+ RRDR_OPTION_JSON_WRAP = (1 << 12), // wrap the response in a JSON header with info about the result
+ RRDR_OPTION_LABEL_QUOTES = (1 << 13), // in CSV output, wrap header labels in double quotes
+ RRDR_OPTION_PERCENTAGE = (1 << 14), // give values as percentage of total
+ RRDR_OPTION_NOT_ALIGNED = (1 << 15), // do not align charts for persistent timeframes
+ RRDR_OPTION_DISPLAY_ABS = (1 << 16), // for badges, display the absolute value, but calculate colors with sign
+ RRDR_OPTION_MATCH_IDS = (1 << 17), // when filtering dimensions, match only IDs
+ RRDR_OPTION_MATCH_NAMES = (1 << 18), // when filtering dimensions, match only names
+ RRDR_OPTION_NATURAL_POINTS = (1 << 19), // return the natural points of the database
+ RRDR_OPTION_VIRTUAL_POINTS = (1 << 20), // return virtual points
+ RRDR_OPTION_ANOMALY_BIT = (1 << 21), // Return the anomaly bit stored in each collected_number
+ RRDR_OPTION_RETURN_RAW = (1 << 22), // Return raw data for aggregating across multiple nodes
+ RRDR_OPTION_RETURN_JWAR = (1 << 23), // Return anomaly rates in jsonwrap
+ RRDR_OPTION_SELECTED_TIER = (1 << 24), // Use the selected tier for the query
+ RRDR_OPTION_ALL_DIMENSIONS = (1 << 25), // Return the full dimensions list
+ RRDR_OPTION_SHOW_DETAILS = (1 << 26), // v2 returns detailed object tree
+ RRDR_OPTION_DEBUG = (1 << 27), // v2 returns request description
+ RRDR_OPTION_MINIFY = (1 << 28), // remove JSON spaces and newlines from JSON output
+ RRDR_OPTION_GROUP_BY_LABELS = (1 << 29), // v2 returns flattened labels per dimension of the chart
// internal ones - not to be exposed to the API
- RRDR_OPTION_HEALTH_RSRVD1 = (1 << 30), // reserved for RRDCALC_OPTION_NO_CLEAR_NOTIFICATION
- RRDR_OPTION_INTERNAL_AR = (1 << 31), // internal use only, to let the formatters know we want to render the anomaly rate
+ RRDR_OPTION_INTERNAL_AR = (1 << 31), // internal use only, to let the formatters know we want to render the anomaly rate
} RRDR_OPTIONS;
typedef enum context_v2_options {
diff --git a/web/api/queries/ses/ses.c b/src/web/api/queries/ses/ses.c
index 39eb445a0..39eb445a0 100644
--- a/web/api/queries/ses/ses.c
+++ b/src/web/api/queries/ses/ses.c
diff --git a/web/api/queries/ses/ses.h b/src/web/api/queries/ses/ses.h
index de8645ff0..de8645ff0 100644
--- a/web/api/queries/ses/ses.h
+++ b/src/web/api/queries/ses/ses.h
diff --git a/web/api/queries/stddev/stddev.c b/src/web/api/queries/stddev/stddev.c
index 8f5431194..8f5431194 100644
--- a/web/api/queries/stddev/stddev.c
+++ b/src/web/api/queries/stddev/stddev.c
diff --git a/web/api/queries/stddev/stddev.h b/src/web/api/queries/stddev/stddev.h
index f7a1a06c3..f7a1a06c3 100644
--- a/web/api/queries/stddev/stddev.h
+++ b/src/web/api/queries/stddev/stddev.h
diff --git a/web/api/queries/sum/sum.c b/src/web/api/queries/sum/sum.c
index cf4484217..cf4484217 100644
--- a/web/api/queries/sum/sum.c
+++ b/src/web/api/queries/sum/sum.c
diff --git a/web/api/queries/sum/sum.h b/src/web/api/queries/sum/sum.h
index 5e07f45d6..5e07f45d6 100644
--- a/web/api/queries/sum/sum.h
+++ b/src/web/api/queries/sum/sum.h
diff --git a/web/api/queries/trimmed_mean/trimmed_mean.c b/src/web/api/queries/trimmed_mean/trimmed_mean.c
index c50db7ed6..c50db7ed6 100644
--- a/web/api/queries/trimmed_mean/trimmed_mean.c
+++ b/src/web/api/queries/trimmed_mean/trimmed_mean.c
diff --git a/web/api/queries/trimmed_mean/trimmed_mean.h b/src/web/api/queries/trimmed_mean/trimmed_mean.h
index 3c09015bf..3c09015bf 100644
--- a/web/api/queries/trimmed_mean/trimmed_mean.h
+++ b/src/web/api/queries/trimmed_mean/trimmed_mean.h
diff --git a/web/api/queries/weights.c b/src/web/api/queries/weights.c
index 68af2250f..44928fea8 100644
--- a/web/api/queries/weights.c
+++ b/src/web/api/queries/weights.c
@@ -113,7 +113,7 @@ static void register_result(DICTIONARY *results, RRDHOST *host, RRDCONTEXT_ACQUI
// we can use the pointer address or RMA as a unique key for each metric
char buf[20 + 1];
ssize_t len = snprintfz(buf, sizeof(buf) - 1, "%p", rma);
- dictionary_set_advanced(results, buf, len + 1, &t, sizeof(struct register_result), NULL);
+ dictionary_set_advanced(results, buf, len, &t, sizeof(struct register_result), NULL);
}
// ----------------------------------------------------------------------------
@@ -158,7 +158,7 @@ static void results_header_to_json(DICTIONARY *results __maybe_unused, BUFFER *w
buffer_json_member_add_string(wb, "group", time_grouping_tostring(group));
buffer_json_member_add_string(wb, "method", weights_method_to_string(method));
- web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", options);
+ rrdr_options_to_buffer_json_array(wb, "options", options);
}
static size_t registered_results_to_json_charts(DICTIONARY *results, BUFFER *wb,
@@ -372,7 +372,7 @@ static void results_header_to_json_v2(DICTIONARY *results __maybe_unused, BUFFER
buffer_json_member_add_object(wb, "request");
buffer_json_member_add_string(wb, "method", weights_method_to_string(method));
- web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", options);
+ rrdr_options_to_buffer_json_array(wb, "options", options);
buffer_json_member_add_object(wb, "scope");
buffer_json_member_add_string(wb, "scope_nodes", qwd->qwr->scope_nodes ? qwd->qwr->scope_nodes : "*");
@@ -1717,6 +1717,8 @@ static ssize_t weights_for_rrdmetric(void *data, RRDHOST *host, RRDCONTEXT_ACQUI
return -1;
}
+ query_progress_done_step(qwr->transaction, 1);
+
return 1;
}
diff --git a/web/api/queries/weights.h b/src/web/api/queries/weights.h
index 66bea6ab2..a93519b6f 100644
--- a/web/api/queries/weights.h
+++ b/src/web/api/queries/weights.h
@@ -57,6 +57,8 @@ typedef struct query_weights_request {
weights_interrupt_callback_t interrupt_callback;
void *interrupt_callback_data;
+
+ uuid_t *transaction;
} QUERY_WEIGHTS_REQUEST;
int web_api_v12_weights(BUFFER *wb, QUERY_WEIGHTS_REQUEST *qwr);
diff --git a/web/api/tests/valid_urls.c b/src/web/api/tests/valid_urls.c
index 764d02807..764d02807 100644
--- a/web/api/tests/valid_urls.c
+++ b/src/web/api/tests/valid_urls.c
diff --git a/web/api/tests/web_api.c b/src/web/api/tests/web_api.c
index 694929a94..694929a94 100644
--- a/web/api/tests/web_api.c
+++ b/src/web/api/tests/web_api.c
diff --git a/web/api/web_api.c b/src/web/api/web_api.c
index 25c765551..10a00e22e 100644
--- a/web/api/web_api.c
+++ b/src/web/api/web_api.c
@@ -2,51 +2,31 @@
#include "web_api.h"
-bool netdata_is_protected_by_bearer = false; // this is controlled by cloud, at the point the agent logs in - this should also be saved to /var/lib/netdata
-DICTIONARY *netdata_authorized_bearers = NULL;
-
-static short int web_client_check_acl_and_bearer(struct web_client *w, WEB_CLIENT_ACL endpoint_acl) {
- if(endpoint_acl == WEB_CLIENT_ACL_NONE || (endpoint_acl & WEB_CLIENT_ACL_NOCHECK))
- // the endpoint is totally public
- return HTTP_RESP_OK;
-
- bool acl_allows = w->acl & endpoint_acl;
- if(!acl_allows)
- // the channel we received the request from (w->acl) is not compatible with the endpoint
- return HTTP_RESP_FORBIDDEN;
-
- if(!netdata_is_protected_by_bearer && !(endpoint_acl & WEB_CLIENT_ACL_BEARER_REQUIRED))
- // bearer protection is not enabled and is not required by the endpoint
- return HTTP_RESP_OK;
-
- if(!(endpoint_acl & (WEB_CLIENT_ACL_BEARER_REQUIRED|WEB_CLIENT_ACL_BEARER_OPTIONAL)))
- // endpoint does not require a bearer
- return HTTP_RESP_OK;
-
- if((w->acl & (WEB_CLIENT_ACL_ACLK|WEB_CLIENT_ACL_WEBRTC)))
- // the request is coming from ACLK or WEBRTC (authorized already),
- return HTTP_RESP_OK;
-
- // at this point we need a bearer to serve the request
- // either because:
- //
- // 1. WEB_CLIENT_ACL_BEARER_REQUIRED, or
- // 2. netdata_is_protected_by_bearer == true
- //
-
- BEARER_STATUS t = api_check_bearer_token(w);
- if(t == BEARER_STATUS_AVAILABLE_AND_VALIDATED)
- // we have a valid bearer on the request
- return HTTP_RESP_OK;
-
- netdata_log_info("BEARER: bearer is required for request: code %d", t);
-
- return HTTP_RESP_PRECOND_FAIL;
-}
-
int web_client_api_request_vX(RRDHOST *host, struct web_client *w, char *url_path_endpoint, struct web_api_command *api_commands) {
buffer_no_cacheable(w->response.data);
+ internal_fatal(web_client_flags_check_auth(w) && !(w->access & HTTP_ACCESS_SIGNED_ID),
+ "signed-in permission should be set, but is missing");
+
+ internal_fatal(!web_client_flags_check_auth(w) && (w->access & HTTP_ACCESS_SIGNED_ID),
+ "signed-in permission is set, but it shouldn't");
+
+ if(!web_client_flags_check_auth(w)) {
+ w->user_role = (netdata_is_protected_by_bearer) ? HTTP_USER_ROLE_NONE : HTTP_USER_ROLE_ANY;
+ w->access = (netdata_is_protected_by_bearer) ? HTTP_ACCESS_NONE : HTTP_ACCESS_ANONYMOUS_DATA;
+ }
+
+#ifdef NETDATA_GOD_MODE
+ web_client_flag_set(w, WEB_CLIENT_FLAG_AUTH_GOD);
+ w->user_role = HTTP_USER_ROLE_ADMIN;
+ w->access = HTTP_ACCESS_ALL;
+#endif
+
+ if((w->access & HTTP_ACCESS_SIGNED_ID) && !(w->access & HTTP_ACCESS_SAME_SPACE)) {
+ // this should never happen: a signed-in user from a different space
+ return web_client_permission_denied(w);
+ }
+
if(unlikely(!url_path_endpoint || !*url_path_endpoint)) {
buffer_flush(w->response.data);
buffer_sprintf(w->response.data, "Which API command?");
@@ -64,8 +44,8 @@ int web_client_api_request_vX(RRDHOST *host, struct web_client *w, char *url_pat
uint32_t hash = simple_hash(api_command);
- for(int i = 0; api_commands[i].command ; i++) {
- if(unlikely(hash == api_commands[i].hash && !strcmp(api_command, api_commands[i].command))) {
+ for(int i = 0; api_commands[i].api ; i++) {
+ if(unlikely(hash == api_commands[i].hash && !strcmp(api_command, api_commands[i].api))) {
if(unlikely(!api_commands[i].allow_subpaths && api_command != url_path_endpoint)) {
buffer_flush(w->response.data);
buffer_sprintf(w->response.data, "API command '%s' does not support subpaths.", api_command);
@@ -76,19 +56,14 @@ int web_client_api_request_vX(RRDHOST *host, struct web_client *w, char *url_pat
if (api_command != url_path_endpoint)
freez(api_command);
- short int code = web_client_check_acl_and_bearer(w, api_commands[i].acl);
- if(code != HTTP_RESP_OK) {
- if(code == HTTP_RESP_FORBIDDEN)
- return web_client_permission_denied(w);
-
- if(code == HTTP_RESP_PRECOND_FAIL)
- return web_client_bearer_required(w);
+ bool acl_allows = ((w->acl & api_commands[i].acl) == api_commands[i].acl) || (api_commands[i].acl & HTTP_ACL_NOCHECK);
+ if(!acl_allows)
+ return web_client_permission_denied_acl(w);
- buffer_flush(w->response.data);
- buffer_sprintf(w->response.data, "Failed with code %d", code);
- w->response.code = code;
- return code;
- }
+ bool permissions_allows =
+ http_access_user_has_enough_access_level_for_endpoint(w->access, api_commands[i].access);
+ if(!permissions_allows)
+ return web_client_permission_denied(w);
char *query_string = (char *)buffer_tostring(w->url_query_string_decoded);
@@ -195,7 +170,7 @@ int web_client_api_request_weights(RRDHOST *host, struct web_client *w, char *ur
time_group_options = value;
else if(!strcmp(name, "options"))
- options |= web_client_api_request_v1_data_options(value);
+ options |= rrdr_options_parse(value);
else if(!strcmp(name, "method"))
method = weights_string_to_method(value);
@@ -278,6 +253,8 @@ int web_client_api_request_weights(RRDHOST *host, struct web_client *w, char *ur
.interrupt_callback = web_client_interrupt_callback,
.interrupt_callback_data = w,
+
+ .transaction = &w->transaction,
};
return web_api_v12_weights(wb, &qwr);
diff --git a/web/api/web_api_v1.c b/src/web/api/web_api_v1.c
index e08f8aa2f..386221d61 100644
--- a/web/api/web_api_v1.c
+++ b/src/web/api/web_api_v1.c
@@ -8,17 +8,20 @@ static struct {
const char *name;
uint32_t hash;
RRDR_OPTIONS value;
-} api_v1_data_options[] = {
+} rrdr_options[] = {
{ "nonzero" , 0 , RRDR_OPTION_NONZERO}
, {"flip" , 0 , RRDR_OPTION_REVERSED}
, {"reversed" , 0 , RRDR_OPTION_REVERSED}
, {"reverse" , 0 , RRDR_OPTION_REVERSED}
, {"jsonwrap" , 0 , RRDR_OPTION_JSON_WRAP}
- , {"min2max" , 0 , RRDR_OPTION_MIN2MAX}
+ , {"min2max" , 0 , RRDR_OPTION_DIMS_MIN2MAX} // rrdr2value() only
+ , {"average" , 0 , RRDR_OPTION_DIMS_AVERAGE} // rrdr2value() only
+ , {"min" , 0 , RRDR_OPTION_DIMS_MIN} // rrdr2value() only
+ , {"max" , 0 , RRDR_OPTION_DIMS_MAX} // rrdr2value() only
, {"ms" , 0 , RRDR_OPTION_MILLISECONDS}
, {"milliseconds" , 0 , RRDR_OPTION_MILLISECONDS}
- , {"abs" , 0 , RRDR_OPTION_ABSOLUTE}
, {"absolute" , 0 , RRDR_OPTION_ABSOLUTE}
+ , {"abs" , 0 , RRDR_OPTION_ABSOLUTE}
, {"absolute_sum" , 0 , RRDR_OPTION_ABSOLUTE}
, {"absolute-sum" , 0 , RRDR_OPTION_ABSOLUTE}
, {"display_absolute" , 0 , RRDR_OPTION_DISPLAY_ABS}
@@ -108,13 +111,15 @@ static struct {
uint32_t hash;
DATASOURCE_FORMAT value;
} api_v1_data_google_formats[] = {
- // this is not error - when google requests json, it expects javascript
- // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source#responseformat
- { "json" , 0 , DATASOURCE_DATATABLE_JSONP}
- , {"html" , 0 , DATASOURCE_HTML}
- , {"csv" , 0 , DATASOURCE_CSV}
- , {"tsv-excel", 0 , DATASOURCE_TSV}
- , { NULL, 0, 0}
+ // this is not an error - when Google requests json, it expects javascript
+ // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source#responseformat
+ {"json", 0, DATASOURCE_DATATABLE_JSONP}
+ , {"html", 0, DATASOURCE_HTML}
+ , {"csv", 0, DATASOURCE_CSV}
+ , {"tsv-excel", 0, DATASOURCE_TSV}
+
+ // terminator
+ , {NULL, 0, 0}
};
void web_client_api_v1_init(void) {
@@ -123,8 +128,8 @@ void web_client_api_v1_init(void) {
for(i = 0; contexts_v2_alert_status[i].name ; i++)
contexts_v2_alert_status[i].hash = simple_hash(contexts_v2_alert_status[i].name);
- for(i = 0; api_v1_data_options[i].name ; i++)
- api_v1_data_options[i].hash = simple_hash(api_v1_data_options[i].name);
+ for(i = 0; rrdr_options[i].name ; i++)
+ rrdr_options[i].hash = simple_hash(rrdr_options[i].name);
for(i = 0; contexts_v2_options[i].name ; i++)
contexts_v2_options[i].hash = simple_hash(contexts_v2_options[i].name);
@@ -157,7 +162,7 @@ char *get_mgmt_api_key(void) {
return guid;
// read it from disk
- int fd = open(api_key_filename, O_RDONLY);
+ int fd = open(api_key_filename, O_RDONLY | O_CLOEXEC);
if(fd != -1) {
char buf[GUID_LEN + 1];
if(read(fd, buf, GUID_LEN) != GUID_LEN)
@@ -183,7 +188,7 @@ char *get_mgmt_api_key(void) {
guid[GUID_LEN] = '\0';
// save it
- fd = open(api_key_filename, O_WRONLY|O_CREAT|O_TRUNC, 444);
+ fd = open(api_key_filename, O_WRONLY|O_CREAT|O_TRUNC | O_CLOEXEC, 444);
if(fd == -1) {
netdata_log_error("Cannot create unique management API key file '%s'. Please adjust config parameter 'netdata management api key file' to a proper path and file.", api_key_filename);
goto temp_key;
@@ -209,21 +214,30 @@ void web_client_api_v1_management_init(void) {
api_secret = get_mgmt_api_key();
}
-inline RRDR_OPTIONS web_client_api_request_v1_data_options(char *o) {
+inline RRDR_OPTIONS rrdr_options_parse_one(const char *o) {
+ RRDR_OPTIONS ret = 0;
+
+ if(!o || !*o) return ret;
+
+ uint32_t hash = simple_hash(o);
+ int i;
+ for(i = 0; rrdr_options[i].name ; i++) {
+ if (unlikely(hash == rrdr_options[i].hash && !strcmp(o, rrdr_options[i].name))) {
+ ret |= rrdr_options[i].value;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+inline RRDR_OPTIONS rrdr_options_parse(char *o) {
RRDR_OPTIONS ret = 0;
char *tok;
while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) {
if(!*tok) continue;
-
- uint32_t hash = simple_hash(tok);
- int i;
- for(i = 0; api_v1_data_options[i].name ; i++) {
- if (unlikely(hash == api_v1_data_options[i].hash && !strcmp(tok, api_v1_data_options[i].name))) {
- ret |= api_v1_data_options[i].value;
- break;
- }
- }
+ ret |= rrdr_options_parse_one(tok);
}
return ret;
@@ -301,14 +315,14 @@ void web_client_api_request_v2_contexts_options_to_buffer_json_array(BUFFER *wb,
buffer_json_array_close(wb);
}
-void web_client_api_request_v1_data_options_to_buffer_json_array(BUFFER *wb, const char *key, RRDR_OPTIONS options) {
+void rrdr_options_to_buffer_json_array(BUFFER *wb, const char *key, RRDR_OPTIONS options) {
buffer_json_member_add_array(wb, key);
RRDR_OPTIONS used = 0; // to prevent adding duplicates
- for(int i = 0; api_v1_data_options[i].name ; i++) {
- if (unlikely((api_v1_data_options[i].value & options) && !(api_v1_data_options[i].value & used))) {
- const char *name = api_v1_data_options[i].name;
- used |= api_v1_data_options[i].value;
+ for(int i = 0; rrdr_options[i].name ; i++) {
+ if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) {
+ const char *name = rrdr_options[i].name;
+ used |= rrdr_options[i].value;
buffer_json_add_array_item_string(wb, name);
}
@@ -317,16 +331,30 @@ void web_client_api_request_v1_data_options_to_buffer_json_array(BUFFER *wb, con
buffer_json_array_close(wb);
}
+void rrdr_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options) {
+ RRDR_OPTIONS used = 0; // to prevent adding duplicates
+ size_t added = 0;
+ for(int i = 0; rrdr_options[i].name ; i++) {
+ if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) {
+ const char *name = rrdr_options[i].name;
+ used |= rrdr_options[i].value;
+
+ if(added++) buffer_strcat(wb, " ");
+ buffer_strcat(wb, name);
+ }
+ }
+}
+
void web_client_api_request_v1_data_options_to_string(char *buf, size_t size, RRDR_OPTIONS options) {
char *write = buf;
char *end = &buf[size - 1];
RRDR_OPTIONS used = 0; // to prevent adding duplicates
int added = 0;
- for(int i = 0; api_v1_data_options[i].name ; i++) {
- if (unlikely((api_v1_data_options[i].value & options) && !(api_v1_data_options[i].value & used))) {
- const char *name = api_v1_data_options[i].name;
- used |= api_v1_data_options[i].value;
+ for(int i = 0; rrdr_options[i].name ; i++) {
+ if (unlikely((rrdr_options[i].value & options) && !(rrdr_options[i].value & used))) {
+ const char *name = rrdr_options[i].name;
+ used |= rrdr_options[i].value;
if(added && write < end)
*write++ = ',';
@@ -512,6 +540,52 @@ inline int web_client_api_request_single_chart(RRDHOST *host, struct web_client
return ret;
}
+static inline int web_client_api_request_variable(RRDHOST *host, struct web_client *w, char *url) {
+ int ret = HTTP_RESP_BAD_REQUEST;
+ char *chart = NULL;
+ char *variable = NULL;
+
+ buffer_flush(w->response.data);
+
+ while(url) {
+ char *value = strsep_skip_consecutive_separators(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = strsep_skip_consecutive_separators(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ // name and value are now the parameters
+ // they are not null and not empty
+
+ if(!strcmp(name, "chart")) chart = value;
+ else if(!strcmp(name, "variable")) variable = value;
+ }
+
+ if(!chart || !*chart || !variable || !*variable) {
+ buffer_sprintf(w->response.data, "A chart= and a variable= are required.");
+ goto cleanup;
+ }
+
+ RRDSET *st = rrdset_find(host, chart);
+ if(!st) st = rrdset_find_byname(host, chart);
+ if(!st) {
+ buffer_strcat(w->response.data, "Chart is not found: ");
+ buffer_strcat_htmlescape(w->response.data, chart);
+ ret = HTTP_RESP_NOT_FOUND;
+ goto cleanup;
+ }
+
+ w->response.data->content_type = CT_APPLICATION_JSON;
+ st->last_accessed_time_s = now_realtime_sec();
+ alert_variable_lookup_trace(host, st, variable, w->response.data);
+
+ return HTTP_RESP_OK;
+
+cleanup:
+ return ret;
+}
+
inline int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url) {
return web_client_api_request_single_chart(host, w, url, health_api_v1_chart_variables2json);
}
@@ -721,7 +795,7 @@ static inline int web_client_api_request_v1_data(RRDHOST *host, struct web_clien
format = web_client_api_request_v1_data_format(value);
}
else if(!strcmp(name, "options")) {
- options |= web_client_api_request_v1_data_options(value);
+ options |= rrdr_options_parse(value);
}
else if(!strcmp(name, "callback")) {
responseHandler = value;
@@ -822,6 +896,7 @@ static inline int web_client_api_request_v1_data(RRDHOST *host, struct web_clien
.priority = STORAGE_PRIORITY_NORMAL,
.interrupt_callback = web_client_interrupt_callback,
.interrupt_callback_data = w,
+ .transaction = &w->transaction,
};
qt = query_target_create(&qtr);
@@ -856,7 +931,7 @@ static inline int web_client_api_request_v1_data(RRDHOST *host, struct web_clien
responseHandler,
google_version,
google_reqId,
- (int64_t)st->last_updated.tv_sec);
+ (int64_t)(st ? st->last_updated.tv_sec : 0));
}
else if(format == DATASOURCE_JSONP) {
if(responseHandler == NULL)
@@ -941,7 +1016,7 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *
char *cookie = strstr(w->response.data->buffer, NETDATA_REGISTRY_COOKIE_NAME "=");
if(cookie)
strncpyz(person_guid, &cookie[sizeof(NETDATA_REGISTRY_COOKIE_NAME)], UUID_STR_LEN - 1);
- else if(extract_bearer_token_from_request(w, person_guid, sizeof(person_guid)) != BEARER_STATUS_EXTRACTED_FROM_HEADER)
+ else if(!extract_bearer_token_from_request(w, person_guid, sizeof(person_guid)))
person_guid[0] = '\0';
char action = '\0';
@@ -1018,13 +1093,13 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *
if(unlikely(action == 'H')) {
// HELLO request, dashboard ACL
analytics_log_dashboard();
- if(unlikely(!web_client_can_access_dashboard(w)))
- return web_client_permission_denied(w);
+ if(unlikely(!http_can_access_dashboard(w)))
+ return web_client_permission_denied_acl(w);
}
else {
// everything else, registry ACL
- if(unlikely(!web_client_can_access_registry(w)))
- return web_client_permission_denied(w);
+ if(unlikely(!http_can_access_registry(w)))
+ return web_client_permission_denied_acl(w);
if(unlikely(do_not_track)) {
buffer_flush(w->response.data);
@@ -1325,21 +1400,6 @@ int web_client_api_request_v1_ml_info(RRDHOST *host, struct web_client *w, char
return HTTP_RESP_OK;
}
-
-int web_client_api_request_v1_ml_models(RRDHOST *host, struct web_client *w, char *url) {
- (void) url;
-
- if (!netdata_ready)
- return HTTP_RESP_SERVICE_UNAVAILABLE;
-
- BUFFER *wb = w->response.data;
- buffer_flush(wb);
- wb->content_type = CT_APPLICATION_JSON;
- ml_host_get_models(host, wb);
- buffer_no_cacheable(wb);
-
- return HTTP_RESP_OK;
-}
#endif // ENABLE_ML
inline int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url) {
@@ -1373,13 +1433,17 @@ static int web_client_api_request_v1_aclk_state(RRDHOST *host, struct web_client
}
int web_client_api_request_v1_metric_correlations(RRDHOST *host, struct web_client *w, char *url) {
- return web_client_api_request_weights(host, w, url, default_metric_correlations_method,
- WEIGHTS_FORMAT_CHARTS, 1);
+ return web_client_api_request_weights(host, w, url, default_metric_correlations_method, WEIGHTS_FORMAT_CHARTS, 1);
}
int web_client_api_request_v1_weights(RRDHOST *host, struct web_client *w, char *url) {
- return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_ANOMALY_RATE,
- WEIGHTS_FORMAT_CONTEXTS, 1);
+ return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_ANOMALY_RATE, WEIGHTS_FORMAT_CONTEXTS, 1);
+}
+
+static void web_client_progress_functions_update(void *data, size_t done, size_t all) {
+ // handle progress updates from the plugin
+ struct web_client *w = data;
+ query_progress_functions_update(&w->transaction, done, all);
}
int web_client_api_request_v1_function(RRDHOST *host, struct web_client *w, char *url) {
@@ -1413,9 +1477,14 @@ int web_client_api_request_v1_function(RRDHOST *host, struct web_client *w, char
char transaction[UUID_COMPACT_STR_LEN];
uuid_unparse_lower_compact(w->transaction, transaction);
- return rrd_function_run(host, wb, timeout, function, true, transaction,
+ CLEAN_BUFFER *source = buffer_create(100, NULL);
+ web_client_source2buffer(w, source);
+
+ return rrd_function_run(host, wb, timeout, w->access, function, true, transaction,
NULL, NULL,
- web_client_interrupt_callback, w, NULL);
+ web_client_progress_functions_update, w,
+ web_client_interrupt_callback, w, NULL,
+ buffer_tostring(source));
}
int web_client_api_request_v1_functions(RRDHOST *host, struct web_client *w, char *url __maybe_unused) {
@@ -1434,6 +1503,113 @@ int web_client_api_request_v1_functions(RRDHOST *host, struct web_client *w, cha
return HTTP_RESP_OK;
}
+void web_client_source2buffer(struct web_client *w, BUFFER *source) {
+ if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_CLOUD))
+ buffer_sprintf(source, "method=NC");
+ else if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_BEARER))
+ buffer_sprintf(source, "method=api-bearer");
+ else
+ buffer_sprintf(source, "method=api");
+
+ if(web_client_flag_check(w, WEB_CLIENT_FLAG_AUTH_GOD))
+ buffer_strcat(source, ",role=god");
+ else
+ buffer_sprintf(source, ",role=%s", http_id2user_role(w->user_role));
+
+ buffer_sprintf(source, ",permissions="HTTP_ACCESS_FORMAT, (HTTP_ACCESS_FORMAT_CAST)w->access);
+
+ if(w->auth.client_name[0])
+ buffer_sprintf(source, ",user=%s", w->auth.client_name);
+
+ if(!uuid_is_null(w->auth.cloud_account_id)) {
+ char uuid_str[UUID_COMPACT_STR_LEN];
+ uuid_unparse_lower_compact(w->auth.cloud_account_id, uuid_str);
+ buffer_sprintf(source, ",account=%s", uuid_str);
+ }
+
+ if(w->client_ip[0])
+ buffer_sprintf(source, ",ip=%s", w->client_ip);
+
+ if(w->forwarded_for)
+ buffer_sprintf(source, ",forwarded_for=%s", w->forwarded_for);
+}
+
+static int web_client_api_request_v1_config(RRDHOST *host, struct web_client *w, char *url __maybe_unused) {
+ char *action = "tree";
+ char *path = "/";
+ char *id = NULL;
+ char *add_name = NULL;
+ int timeout = 120;
+
+ while(url) {
+ char *value = strsep_skip_consecutive_separators(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = strsep_skip_consecutive_separators(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ // name and value are now the parameters
+ // they are not null and not empty
+
+ if(!strcmp(name, "action"))
+ action = value;
+ else if(!strcmp(name, "path"))
+ path = value;
+ else if(!strcmp(name, "id"))
+ id = value;
+ else if(!strcmp(name, "name"))
+ add_name = value;
+ else if(!strcmp(name, "timeout")) {
+ timeout = (int)strtol(value, NULL, 10);
+ if(timeout < 10)
+ timeout = 10;
+ }
+ }
+
+ char transaction[UUID_COMPACT_STR_LEN];
+ uuid_unparse_lower_compact(w->transaction, transaction);
+
+ size_t len = strlen(action) + (id ? strlen(id) : 0) + strlen(path) + (add_name ? strlen(add_name) : 0) + 100;
+
+ char cmd[len];
+ if(strcmp(action, "tree") == 0)
+ snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " tree '%s' '%s'", path, id?id:"");
+ else {
+ DYNCFG_CMDS c = dyncfg_cmds2id(action);
+ if(!id || !*id || !dyncfg_is_valid_id(id)) {
+ rrd_call_function_error(w->response.data, "invalid id given", HTTP_RESP_BAD_REQUEST);
+ return HTTP_RESP_BAD_REQUEST;
+ }
+ if(c == DYNCFG_CMD_NONE) {
+ rrd_call_function_error(w->response.data, "invalid action given", HTTP_RESP_BAD_REQUEST);
+ return HTTP_RESP_BAD_REQUEST;
+ }
+ else if(c == DYNCFG_CMD_ADD) {
+ if(!add_name || !*add_name || !dyncfg_is_valid_id(add_name)) {
+ rrd_call_function_error(w->response.data, "invalid name given", HTTP_RESP_BAD_REQUEST);
+ return HTTP_RESP_BAD_REQUEST;
+ }
+ snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " %s %s %s", id, dyncfg_id2cmd_one(c), add_name);
+ }
+ else
+ snprintfz(cmd, sizeof(cmd), PLUGINSD_FUNCTION_CONFIG " %s %s", id, dyncfg_id2cmd_one(c));
+ }
+
+ CLEAN_BUFFER *source = buffer_create(100, NULL);
+ web_client_source2buffer(w, source);
+
+ buffer_flush(w->response.data);
+ int code = rrd_function_run(host, w->response.data, timeout, w->access, cmd,
+ true, transaction,
+ NULL, NULL,
+ web_client_progress_functions_update, w,
+ web_client_interrupt_callback, w,
+ w->payload, buffer_tostring(source));
+
+ return code;
+}
+
#ifndef ENABLE_DBENGINE
int web_client_api_request_v1_dbengine_stats(RRDHOST *host __maybe_unused, struct web_client *w __maybe_unused, char *url __maybe_unused) {
return HTTP_RESP_NOT_FOUND;
@@ -1540,50 +1716,238 @@ int web_client_api_request_v1_mgmt(RRDHOST *host, struct web_client *w, char *ur
}
needle += strlen(HLT_MGM);
if (*needle != '\0') {
- buffer_strcat(w->response.data, "Invalid management request. Curently only 'health' is supported.");
+ buffer_strcat(w->response.data, "Invalid management request. Currently only 'health' is supported.");
return HTTP_RESP_NOT_FOUND;
}
return web_client_api_request_v1_mgmt_health(host, w, url);
}
static struct web_api_command api_commands_v1[] = {
- { "info", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_info, 0 },
- { "data", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_data, 0 },
- { "chart", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_chart, 0 },
- { "charts", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_charts, 0 },
- { "context", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_context, 0 },
- { "contexts", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_contexts, 0 },
-
+ // time-series data APIs
+ {
+ .api = "data",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_data,
+ .allow_subpaths = 0
+ },
+ {
+ .api = "weights",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_weights,
+ .allow_subpaths = 0
+ },
+ {
+ // deprecated - do not use anymore - use "weights"
+ .api = "metric_correlations",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_metric_correlations,
+ .allow_subpaths = 0
+ },
+ {
+ // exporting API
+ .api = "allmetrics",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_allmetrics,
+ .allow_subpaths = 0
+ },
+ {
+ // badges can be fetched with both dashboard and badge ACL
+ .api = "badge.svg",
+ .hash = 0,
+ .acl = HTTP_ACL_BADGES,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_badge,
+ .allow_subpaths = 0
+ },
+
+ // alerts APIs
+ {
+ .api = "alarms",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_alarms,
+ .allow_subpaths = 0
+ },
+ {
+ .api = "alarms_values",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_alarms_values,
+ .allow_subpaths = 0
+ },
+ {
+ .api = "alarm_log",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_alarm_log,
+ .allow_subpaths = 0
+ },
+ {
+ .api = "alarm_variables",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_alarm_variables,
+ .allow_subpaths = 0
+ },
+ {
+ .api = "variable",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_variable,
+ .allow_subpaths = 0
+ },
+ {
+ .api = "alarm_count",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_alarm_count,
+ .allow_subpaths = 0
+ },
+
+ // functions APIs - they check permissions per function call
+ {
+ .api = "function",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_function,
+ .allow_subpaths = 0
+ },
+ {
+ .api = "functions",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_functions,
+ .allow_subpaths = 0
+ },
+
+ // time-series metadata APIs
+ {
+ .api = "chart",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_chart,
+ .allow_subpaths = 0
+ },
+ {
+ .api = "charts",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_charts,
+ .allow_subpaths = 0
+ },
+ {
+ .api = "context",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_context,
+ .allow_subpaths = 0
+ },
+ {
+ .api = "contexts",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_contexts,
+ .allow_subpaths = 0
+ },
+
+ // registry APIs
+ {
// registry checks the ACL by itself, so we allow everything
- { "registry", 0, WEB_CLIENT_ACL_NOCHECK, web_client_api_request_v1_registry, 0 },
-
- // badges can be fetched with both dashboard and badge permissions
- { "badge.svg", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC | WEB_CLIENT_ACL_BADGE, web_client_api_request_v1_badge, 0 },
-
- { "alarms", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_alarms, 0 },
- { "alarms_values", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_alarms_values, 0 },
- { "alarm_log", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_alarm_log, 0 },
- { "alarm_variables", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_alarm_variables, 0 },
- { "alarm_count", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_alarm_count, 0 },
- { "allmetrics", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_allmetrics, 0 },
+ .api = "registry",
+ .hash = 0,
+ .acl = HTTP_ACL_NONE, // it manages acl by itself
+ .access = HTTP_ACCESS_NONE, // it manages access by itself
+ .callback = web_client_api_request_v1_registry,
+ .allow_subpaths = 0
+ },
+
+ // agent information APIs
+ {
+ .api = "info",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_info,
+ .allow_subpaths = 0
+ },
+ {
+ .api = "aclk",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_aclk_state,
+ .allow_subpaths = 0
+ },
+ {
+ // deprecated - use /api/v2/info
+ .api = "dbengine_stats",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_dbengine_stats,
+ .allow_subpaths = 0
+ },
+
+ // dyncfg APIs
+ {
+ .api = "config",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_config,
+ .allow_subpaths = 0
+ },
#if defined(ENABLE_ML)
- { "ml_info", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_ml_info, 0 },
- // { "ml_models", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_ml_models },
+ {
+ .api = "ml_info",
+ .hash = 0,
+ .acl = HTTP_ACL_DASHBOARD,
+ .access = HTTP_ACCESS_ANONYMOUS_DATA,
+ .callback = web_client_api_request_v1_ml_info,
+ .allow_subpaths = 0
+ },
#endif
- {"manage", 0, WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_mgmt, 1 },
- { "aclk", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_aclk_state, 0 },
- { "metric_correlations", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_metric_correlations, 0 },
- { "weights", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_weights, 0 },
-
- {"function", 0, WEB_CLIENT_ACL_ACLK_WEBRTC_DASHBOARD_WITH_BEARER | ACL_DEV_OPEN_ACCESS, web_client_api_request_v1_function, 0 },
- {"functions", 0, WEB_CLIENT_ACL_ACLK_WEBRTC_DASHBOARD_WITH_BEARER | ACL_DEV_OPEN_ACCESS, web_client_api_request_v1_functions, 0 },
-
- { "dbengine_stats", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_dbengine_stats, 0 },
-
- // terminator
- { NULL, 0, WEB_CLIENT_ACL_NONE, NULL, 0 },
+ {
+ // deprecated
+ .api = "manage",
+ .hash = 0,
+ .acl = HTTP_ACL_MANAGEMENT,
+ .access = HTTP_ACCESS_NONE, // it manages access by itself
+ .callback = web_client_api_request_v1_mgmt,
+ .allow_subpaths = 1
+ },
+
+ {
+ // terminator - keep this last on this list
+ .api = NULL,
+ .hash = 0,
+ .acl = HTTP_ACL_NONE,
+ .access = HTTP_ACCESS_NONE,
+ .callback = NULL,
+ .allow_subpaths = 0
+ },
};
inline int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url_path_endpoint) {
@@ -1592,8 +1956,8 @@ inline int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *
if(unlikely(initialized == 0)) {
initialized = 1;
- for(int i = 0; api_commands_v1[i].command ; i++)
- api_commands_v1[i].hash = simple_hash(api_commands_v1[i].command);
+ for(int i = 0; api_commands_v1[i].api ; i++)
+ api_commands_v1[i].hash = simple_hash(api_commands_v1[i].api);
}
return web_client_api_request_vX(host, w, url_path_endpoint, api_commands_v1);
diff --git a/web/api/web_api_v1.h b/src/web/api/web_api_v1.h
index 5845f3ec2..cf0efbd13 100644
--- a/web/api/web_api_v1.h
+++ b/src/web/api/web_api_v1.h
@@ -12,8 +12,11 @@ CONTEXTS_V2_ALERT_STATUS web_client_api_request_v2_alert_status(char *o);
void web_client_api_request_v2_contexts_options_to_buffer_json_array(BUFFER *wb, const char *key, CONTEXTS_V2_OPTIONS options);
void web_client_api_request_v2_contexts_alerts_status_to_buffer_json_array(BUFFER *wb, const char *key, CONTEXTS_V2_ALERT_STATUS options);
-RRDR_OPTIONS web_client_api_request_v1_data_options(char *o);
-void web_client_api_request_v1_data_options_to_buffer_json_array(BUFFER *wb, const char *key, RRDR_OPTIONS options);
+RRDR_OPTIONS rrdr_options_parse(char *o);
+RRDR_OPTIONS rrdr_options_parse_one(const char *o);
+
+void rrdr_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options);
+void rrdr_options_to_buffer_json_array(BUFFER *wb, const char *key, RRDR_OPTIONS options);
void web_client_api_request_v1_data_options_to_string(char *buf, size_t size, RRDR_OPTIONS options);
uint32_t web_client_api_request_v1_data_format(char *name);
@@ -38,6 +41,8 @@ void web_client_api_v1_management_init(void);
void host_labels2json(RRDHOST *host, BUFFER *wb, const char *key);
void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST *host, BUFFER *wb, const char *key);
+void web_client_source2buffer(struct web_client *w, BUFFER *source);
+
extern char *api_secret;
#endif //NETDATA_WEB_API_V1_H
diff --git a/web/api/web_api_v2.h b/src/web/api/web_api_v2.h
index 4a1893bd8..4a1893bd8 100644
--- a/web/api/web_api_v2.h
+++ b/src/web/api/web_api_v2.h
diff --git a/web/api/Makefile.am b/web/api/Makefile.am
deleted file mode 100644
index f925de403..000000000
--- a/web/api/Makefile.am
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-SUBDIRS = \
- badges \
- ilove \
- queries \
- exporters \
- formatters \
- health \
- $(NULL)
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-dist_web_DATA = \
- netdata-swagger.yaml \
- netdata-swagger.json \
- $(NULL)
diff --git a/web/api/README.md b/web/api/README.md
deleted file mode 100644
index 237394a88..000000000
--- a/web/api/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# API
-
-## Netdata agent REST API
-
-The complete documentation of the Netdata agent's REST API is documented in the OpenAPI format [in our GitHub repository](https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml).
-
-You can explore it using the **[Swagger UI](https://learn.netdata.cloud/api)**, or the **[Swagger Editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml)**.
-
-## Netdata cloud API
-
-A very basic Netdata cloud REST API supports the [Grafana data source plugin](https://github.com/netdata/netdata-grafana-datasource-plugin/blob/master/README.md),
-but has not yet been expanded for wider use. We intend to provide a properly documented API in the future.
diff --git a/web/api/badges/Makefile.am b/web/api/badges/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/badges/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/badges/README.md b/web/api/badges/README.md
deleted file mode 100644
index f6c031181..000000000
--- a/web/api/badges/README.md
+++ /dev/null
@@ -1,369 +0,0 @@
-<!--
-title: "Netdata badges"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/badges/README.md
-sidebar_label: "Netdata badges"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api"
--->
-
-# Netdata badges
-
-**Badges are cool!**
-
-Netdata can generate badges for any chart and any dimension at any time-frame. Badges come in `SVG` and can be added to any web page using an `<IMG>` HTML tag.
-
-**Netdata badges are powerful**!
-
-Given that Netdata collects from **1.000** to **5.000** metrics per server (depending on the number of network interfaces, disks, cpu cores, applications running, users logged in, containers running, etc) and that Netdata already has data reduction/aggregation functions embedded, the badges can be quite powerful.
-
-For each metric/dimension and for arbitrary time-frames badges can show **min**, **max** or **average** value, but also **sum** or **incremental-sum** to have their **volume**.
-
-For example, there is [a chart in Netdata that shows the current requests/s of nginx](http://london.my-netdata.io/#nginx_local_nginx). Using this chart alone we can show the following badges (we could add more time-frames, like **today**, **yesterday**, etc):
-
-<a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&value_color=grey:null%7Cblue&label=nginx%20active%20connections%20now&units=null&precision=0"/></a> <a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&after=-3600&value_color=orange&label=last%20hour%20average&units=null&options=unaligned&precision=0"/></a> <a href="https://registry.my-netdata.io/#nginx_local_nginx"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=nginx_local.connections&dimensions=active&group=max&after=-3600&value_color=red&label=last%20hour%20max&units=null&options=unaligned&precision=0"/></a>
-
-Similarly, there is [a chart that shows outbound bandwidth per class](http://london.my-netdata.io/#tc_eth0), using QoS data. So it shows `kilobits/s` per class. Using this chart we can show:
-
-<a href="https://registry.my-netdata.io/#tc_eth0"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=tc.world_out&dimensions=web_server&value_color=green&label=web%20server%20sends%20now&units=kbps"/></a> <a href="https://registry.my-netdata.io/#tc_eth0"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=tc.world_out&dimensions=web_server&after=-86400&options=unaligned&group=sum&divide=8388608&value_color=blue&label=web%20server%20sent%20today&units=GB"/></a>
-
-The right one is a **volume** calculation. Netdata calculated the total of the last 86.400 seconds (a day) which gives `kilobits`, then divided it by 8 to make it KB, then by 1024 to make it MB and then by 1024 to make it GB. Calculations like this are quite accurate, since for every value collected, every second, Netdata interpolates it to second boundary using microsecond calculations.
-
-Let's see a few more badge examples (they come from the [Netdata registry](https://github.com/netdata/netdata/blob/master/registry/README.md)):
-
-- **cpu usage of user `root`** (you can pick any user; 100% = 1 core). This will be `green <10%`, `yellow <20%`, `orange <50%`, `blue <100%` (1 core), `red` otherwise (you define thresholds and colors on the URL).
-
- <a href="https://registry.my-netdata.io/#apps_cpu"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25"></img></a> <a href="https://registry.my-netdata.io/#apps_cpu"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&after=-3600&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20average%20cpu%20last%20hour&units=%25"></img></a>
-
-- **mysql queries per second**
-
- <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&label=mysql%20queries%20now&value_color=red&units=%5Cs"></img></a> <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&after=-3600&options=unaligned&group=sum&label=mysql%20queries%20this%20hour&value_color=green&units=null"></img></a> <a href="https://registry.my-netdata.io/#mysql_local"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.queries&dimensions=questions&after=-86400&options=unaligned&group=sum&label=mysql%20queries%20today&value_color=blue&units=null"></img></a>
-
- niche ones: **mysql SELECT statements with JOIN, which did full table scans**:
-
- <a href="https://registry.my-netdata.io/#mysql_local_issues"><img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=mysql_local.join_issues&dimensions=scan&after=-3600&label=full%20table%20scans%20the%20last%20hour&value_color=orange&group=sum&units=null"></img></a>
-
----
-
-> So, every single line on the charts of a [Netdata dashboard](http://london.my-netdata.io/), can become a badge and this badge can calculate **average**, **min**, **max**, or **volume** for any time-frame! And you can also vary the badge color using conditions on the calculated value.
-
----
-
-## How to create badges
-
-The basic URL is `http://your.netdata:19999/api/v1/badge.svg?option1&option2&option3&...`.
-
-Here is what you can put for `options` (these are standard Netdata API options):
-
-- `chart=CHART.NAME`
-
- The chart to get the values from.
-
- **This is the only parameter required** and with just this parameter, Netdata will return the sum of the latest values of all chart dimensions.
-
- Example:
-
-```html
- <a href="#">
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu"></img>
- </a>
-```
-
- Which produces this:
-
- <a href="#">
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu"></img>
- </a>
-
-- `alarm=NAME`
-
- Render the current value and status of an alert linked to the chart. This option can be ignored if the badge to be generated is not related to an alert.
-
- The current value of the alert will be rendered. The color of the badge will indicate the status of the alert.
-
- For alert badges, **both `chart` and `alarm` parameters are required**.
-
-- `dimensions=DIMENSION1|DIMENSION2|...`
-
- The dimensions of the chart to use. If you don't set any dimension, all will be used. When multiple dimensions are used, Netdata will sum their values. You can append `options=absolute` if you want this sum to convert all values to positive before adding them.
-
- Pipes in HTML have to escaped with `%7C`.
-
- Example:
-
-```html
- <a href="#">
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&dimensions=system%7Cnice"></img>
- </a>
-```
-
- Which produces this:
-
- <a href="#">
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&dimensions=system%7Cnice"></img>
- </a>
-
-- `before=SECONDS` and `after=SECONDS`
-
- The timeframe. These can be absolute unix timestamps, or relative to now, number of seconds. By default `before=0` and `after=-1` (1 second in the past).
-
- To get the last minute set `after=-60`. This will give the average of the last complete minute (XX:XX:00 - XX:XX:59).
-
- To get the max of the last hour set `after=-3600&group=max`. This will give the maximum value of the last complete hour (XX:00:00 - XX:59:59)
-
- Example:
-
-```html
- <a href="#">
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60"></img>
- </a>
-```
-
- Which produces the average of last complete minute (XX:XX:00 - XX:XX:59):
-
- <a href="#">
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60"></img>
- </a>
-
- While this is the previous minute (one minute before the last one, again aligned XX:XX:00 - XX:XX:59):
-
-```html
- <a href="#">
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&before=-60&after=-60"></img>
- </a>
-```
-
- It produces this:
-
- <a href="#">
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&before=-60&after=-60"></img>
- </a>
-
-- `group=min` or `group=max` or `group=average` (the default) or `group=sum` or `group=incremental-sum`
-
- If Netdata will have to reduce (aggregate) the data to calculate the value, which aggregation method to use.
-
- - `max` will find the max value for the timeframe. This works on both positive and negative dimensions. It will find the most extreme value.
-
- - `min` will find the min value for the timeframe. This works on both positive and negative dimensions. It will find the number closest to zero.
-
- - `average` will calculate the average value for the timeframe.
-
- - `sum` will sum all the values for the timeframe. This is nice for finding the volume of dimensions for a timeframe. So if you have a dimension that reports `X per second`, you can find the volume of the dimension in a timeframe, by adding its values in that timeframe.
-
- - `incremental-sum` will sum the difference of each value to its next. Let's assume you have a dimension that does not measure the rate of something, but the absolute value of it. So it has values like this "1, 5, 3, 7, 4". `incremental-sum` will calculate the difference of adjacent values. In this example, they will be `(5 - 1) + (3 - 5) + (7 - 3) + (4 - 7) = 3` (which is equal to the last value minus the first = 4 - 1).
-
-- `options=opt1|opt2|opt3|...`
-
- These fine tune various options of the API. Here is what you can use for badges (the API has more option, but only these are useful for badges):
-
- - `percentage`, instead of returning a value, calculate the percentage of the sum of the values of the selected dimensions (selected sum / total sum * 100). This also sets the units to `%`.
-
- - `absolute` or `abs`, turn all values positive and then sum them.
-
- - `display_absolute` or `display-absolute`, to use the signed value during color calculation, but display the absolute value on the badge.
-
- - `min2max`, when multiple dimensions are given, do not sum them, but take their `max - min`.
-
- - `unaligned`, when data are reduced / aggregated (e.g. the request is about the average of the last minute, or hour), Netdata by default aligns them so that the charts will have a constant shape (so average per minute returns always XX:XX:00 - XX:XX:59). Setting the `unaligned` option, Netdata will aggregate data without any alignment, so if the request is for 60 seconds, it will aggregate the latest 60 seconds of collected data.
-
-These are options dedicated to badges:
-
-- `label=TEXT`
-
- The label of the badge.
-
-- `units=TEXT`
-
- The units of the badge. If you want to put a `/`, please put a `\`. This is because Netdata allows badges parameters to be given as path in URL, instead of query string. You can also use `null` or `empty` to show it without any units.
-
- The units `seconds`, `minutes` and `hours` trigger special formatting. The value has to be in this unit, and Netdata will automatically change it to show a more pretty duration.
-
-- `multiply=NUMBER`
-
- Multiply the value with this number. The default is `1`.
-
-- `divide=NUMBER`
-
- Divide the value with this number. The default is `1`.
-
-- Color customization parameters
-
- The following parameters specify colors of each individual part of the badge. Each parameter is documented in detail
- below.
-
- | Area of badge | Background color parameter | Text color parameter |
- | ---: | :------------------------: | :------------------: |
- | Label (left) part | `label_color` | `text_color_lbl` |
- | Value (right) part | `value_color` | `text_color_val` |
-
- - `label_color=COLOR`
-
- The color of the label (the left part). You can use any HTML color in `RGB` or `RRGGBB` hex notation (without
- the `#` character at the beginning). Additionally, you can use one of the following predefined colors (and you
- can use them by their name):
-
- - `green`
- - `brightgreen`
- - `yellow`
- - `yellowgreen`
- - `orange`
- - `red`
- - `blue`
- - `grey`
- - `gray`
- - `lightgrey`
- - `lightgray`
-
- These colors are taken from <https://github.com/badges/shields>, which makes them compatible with standard
- badges.
-
- - `value_color=COLOR:null|COLOR<VALUE|COLOR>VALUE|COLOR>=VALUE|COLOR<=VALUE|...`
-
- You can add a pipe delimited list of conditions to pick the value color. The first matching (left to right) will
- be used.
-
- Example: `value_color=grey:null|green<10|yellow<100|orange<1000|blue<10000|red`
-
- The above will set `grey` if no value exists (not collected within the `gap when lost iterations above` in
- `netdata.conf` for the chart), `green` if the value is less than 10, `yellow` if the value is less than 100, and
- so on. Netdata will use `red` if no other conditions match. Only integers are supported as values.
-
- The supported operators are `<`, `>`, `<=`, `>=`, `=` (or `:`), and `!=` (or `<>`).
-
- You can also use the same syntax as the `label_color` parameter to define each of these colors. You can
- reference a predefined color by name or `RGB`/`RRGGBB` hex notation.
-
- - `text_color_lbl=RGB` or `text_color_lbl=RRGGBB` or `text_color_lbl=color_by_name`
-
- This value specifies the font color for the font of left/label side of the badge. The syntax is the same as the
- `label_color` parameter. If not given, or given with an empty value, Netdata will use the default color.
-
- - `text_color_val=RGB` or `text_color_val=RRGGBB` or `text_color_lbl=color_by_name`
-
- This value specifies the font color for the font of right/value side of the badge. The syntax is the same as the
- `label_color` parameter. If not given, or given with an empty value, Netdata will use the default color.
-
-- `precision=NUMBER`
-
- The number of decimal digits of the value. By default Netdata will add:
-
- - no decimal digits for values > 1000
- - 1 decimal digit for values > 100
- - 2 decimal digits for values > 1
- - 3 decimal digits for values > 0.1
- - 4 decimal digits for values \<= 0.1
-
- Using the `precision=NUMBER` you can set your preference per badge.
-
-- `scale=XXX`
-
- This option scales the svg image. It accepts values above or equal to 100 (100% is the default scale). For example, lets get a few different sizes:
-
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=100"></img> original<br/>
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=125"></img> `scale=125`<br/>
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=150"></img> `scale=150`<br/>
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=175"></img> `scale=175`<br/>
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=system.cpu&after=-60&scale=200"></img> `scale=200`
-
-- `fixed_width_lbl=NUMBER` and `fixed_width_val=NUMBER`
-
- This parameter overrides auto-sizing of badges and displays them at fixed widths. `fixed_width_lbl` determines the size of the label's left side (label/name). `fixed_width_val` determines the size of the the label's right side (value). You must set both parameters together, or they will be ignored.
-
- You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped.
-
- The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.
-
-- `refresh=auto` or `refresh=SECONDS`
-
- This option enables auto-refreshing of images. Netdata will send the HTTP header `Refresh: SECONDS` to the web browser, thus requesting automatic refresh of the images at regular intervals.
-
- `auto` will calculate the proper `SECONDS` to avoid unnecessary refreshes. If `SECONDS` is zero, this feature is disabled (it is also disabled by default).
-
- Auto-refreshing like this, works only if you access the badge directly. So, you may have to put it an `embed` or `iframe` for it to be auto-refreshed. Use something like this:
-
-```html
-<embed src="BADGE_URL" type="image/svg+xml" height="20" />
-```
-
- Another way is to use javascript to auto-refresh them. You can auto-refresh all the Netdata badges on a page using javascript. You have to add a class to all the Netdata badges, like this `<img class="netdata-badge" src="..."/>`. Then add this javascript code to your page (it requires jquery):
-
-```html
-<script>
- var NETDATA_BADGES_AUTOREFRESH_SECONDS = 5;
- function refreshNetdataBadges() {
- var now = new Date().getTime().toString();
- $('.netdata-badge').each(function() {
- this.src = this.src.replace(/\&_=\d*/, '') + '&_=' + now;
- });
- setTimeout(refreshNetdataBadges, NETDATA_BADGES_AUTOREFRESH_SECONDS * 1000);
- }
- setTimeout(refreshNetdataBadges, NETDATA_BADGES_AUTOREFRESH_SECONDS * 1000);
-</script>
-```
-
-A more advanced badges refresh method is to include `http://your.netdata.ip:19999/refresh-badges.js` in your page.
-
----
-
-## Escaping URLs
-
-Keep in mind that if you add badge URLs to your HTML pages you have to escape the special characters:
-
-|character|name|escape sequence|
-|:-------:|:--:|:-------------:|
-|``|space (in labels and units)|`%20`|
-|`#`|hash (for colors)|`%23`|
-|`%`|percent (in units)|`%25`|
-|`<`|less than|`%3C`|
-|`>`|greater than|`%3E`|
-|`\`|backslash (when you need a `/`)|`%5C`|
-|`\|`|pipe (delimiting parameters)|`%7C`|
-
-## FAQ
-
-#### Is it fast?
-
-On modern hardware, Netdata can generate about **2.000 badges per second per core**, before noticing any delays. It generates a badge in about half a millisecond!
-
-Of course these timing are for badges that use recent data. If you need badges that do calculations over long durations (a day, or more), timing will differ. Netdata logs its timings at its `access.log`, so take a look there before adding a heavy badge on a busy web site. Of course, you can cache such badges or have a cron job get them from Netdata and save them at your web server at regular intervals.
-
-#### Embedding badges in GitHub
-
-You have 2 options:
-- SVG images with markdown
-- SVG images with HTML (directly in .md files)
-
-For example, this is the cpu badge shown above:
-
-- Markdown example:
-
-```md
-[![A nice name](https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25)](https://registry.my-netdata.io/#apps_cpu)
-```
-
-- HTML example:
-
-```html
-<a href="https://registry.my-netdata.io/#apps_cpu">
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25"></img>
-</a>
-```
-
-Both produce this:
-
-<a href="https://registry.my-netdata.io/#apps_cpu">
- <img src="https://registry.my-netdata.io/api/v1/badge.svg?chart=users.cpu&dimensions=root&value_color=grey:null%7Cgreen%3C10%7Cyellow%3C20%7Corange%3C50%7Cblue%3C100%7Cred&label=root%20user%20cpu%20now&units=%25"></img>
-</a>
-
-#### Auto-refreshing badges in GitHub
-
-Unfortunately it cannot be done. GitHub fetches all the images using a proxy and rewrites all the URLs to be served by the proxy.
-
-You can refresh them from your browser console though. Press F12 to open the web browser console (switch to the console too), paste the following and press enter. They will refresh:
-
-```js
-var len = document.images.length; while(len--) { document.images[len].src = document.images[len].src.replace(/\?cacheBuster=\d*/, "") + "?cacheBuster=" + new Date().getTime().toString(); };
-```
-
-
diff --git a/web/api/exporters/Makefile.am b/web/api/exporters/Makefile.am
deleted file mode 100644
index 06fda51ef..000000000
--- a/web/api/exporters/Makefile.am
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-SUBDIRS = \
- shell \
- prometheus \
- $(NULL)
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/exporters/README.md b/web/api/exporters/README.md
deleted file mode 100644
index 4be567691..000000000
--- a/web/api/exporters/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-<!--
-title: "Exporters"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/README.md
-sidebar_label: "Exporters"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api"
--->
-
-# Exporters
-
-TBD
-
-
diff --git a/web/api/exporters/prometheus/Makefile.am b/web/api/exporters/prometheus/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/exporters/prometheus/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/exporters/prometheus/README.md b/web/api/exporters/prometheus/README.md
deleted file mode 100644
index 5e0f98c16..000000000
--- a/web/api/exporters/prometheus/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-<!--
-title: "Prometheus exporter"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/prometheus/README.md
-sidebar_label: "Prometheus exporter"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Exporters"
--->
-
-# Prometheus exporter
-
-Read the Prometheus exporter documentation: [Using Netdata with Prometheus](https://github.com/netdata/netdata/blob/master/exporting/prometheus/README.md).
-
-
diff --git a/web/api/exporters/shell/Makefile.am b/web/api/exporters/shell/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/exporters/shell/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/exporters/shell/README.md b/web/api/exporters/shell/README.md
deleted file mode 100644
index 7e28829a7..000000000
--- a/web/api/exporters/shell/README.md
+++ /dev/null
@@ -1,73 +0,0 @@
-<!--
-title: "Shell exporter"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/shell/README.md
-sidebar_label: "Shell exporter"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Exporters"
--->
-
-# Shell exporter
-
-Shell scripts can now query Netdata:
-
-```sh
-eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')"
-```
-
-after this command, all the Netdata metrics are exposed to shell. Check:
-
-```sh
-# source the metrics
-eval "$(curl -s 'http://localhost:19999/api/v1/allmetrics')"
-
-# let's see if there are variables exposed by Netdata for system.cpu
-set | grep "^NETDATA_SYSTEM_CPU"
-
-NETDATA_SYSTEM_CPU_GUEST=0
-NETDATA_SYSTEM_CPU_GUEST_NICE=0
-NETDATA_SYSTEM_CPU_IDLE=95
-NETDATA_SYSTEM_CPU_IOWAIT=0
-NETDATA_SYSTEM_CPU_IRQ=0
-NETDATA_SYSTEM_CPU_NICE=0
-NETDATA_SYSTEM_CPU_SOFTIRQ=0
-NETDATA_SYSTEM_CPU_STEAL=0
-NETDATA_SYSTEM_CPU_SYSTEM=1
-NETDATA_SYSTEM_CPU_USER=4
-NETDATA_SYSTEM_CPU_VISIBLETOTAL=5
-
-# let's see the total cpu utilization of the system
-echo ${NETDATA_SYSTEM_CPU_VISIBLETOTAL}
-5
-
-# what about alerts?
-set | grep "^NETDATA_ALARM_SYSTEM_SWAP_"
-NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_STATUS=CLEAR
-NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_VALUE=51
-
-# let's get the current status of the alert 'used swap'
-echo ${NETDATA_ALARM_SYSTEM_SWAP_USED_SWAP_STATUS}
-CLEAR
-
-# is it fast?
-time curl -s 'http://localhost:19999/api/v1/allmetrics' >/dev/null
-
-real 0m0,070s
-user 0m0,000s
-sys 0m0,007s
-
-# it is...
-# 0.07 seconds for curl to be loaded, connect to Netdata and fetch the response back...
-```
-
-The `_VISIBLETOTAL` variable sums up all the dimensions of each chart.
-
-The format of the variables is:
-
-```sh
-NETDATA_${chart_id^^}_${dimension_id^^}="${value}"
-```
-
-The value is rounded to the closest integer, since shell script cannot process decimal numbers.
-
-
diff --git a/web/api/formatters/Makefile.am b/web/api/formatters/Makefile.am
deleted file mode 100644
index 11f239ccf..000000000
--- a/web/api/formatters/Makefile.am
+++ /dev/null
@@ -1,15 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-SUBDIRS = \
- csv \
- json \
- ssv \
- value \
- $(NULL)
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/formatters/README.md b/web/api/formatters/README.md
deleted file mode 100644
index ddc70d90f..000000000
--- a/web/api/formatters/README.md
+++ /dev/null
@@ -1,82 +0,0 @@
-<!--
-title: "Query formatting"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/README.md
-sidebar_label: "Query formatting"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Formatters"
--->
-
-# Query formatting
-
-API data queries need to be formatted before returned to the caller.
-Using API parameters, the caller may define the format he/she wishes to get back.
-
-The following formats are supported:
-
-| format|module|content type|description|
-|:----:|:----:|:----------:|:----------|
-| `array`|[ssv](https://github.com/netdata/netdata/blob/master/web/api/formatters/ssv/README.md)|application/json|a JSON array|
-| `csv`|[csv](https://github.com/netdata/netdata/blob/master/web/api/formatters/csv/README.md)|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines|
-| `csvjsonarray`|[csv](https://github.com/netdata/netdata/blob/master/web/api/formatters/csv/README.md)|application/json|a JSON array, with each row as another array (the first row has the dimension names)|
-| `datasource`|[json](https://github.com/netdata/netdata/blob/master/web/api/formatters/json/README.md)|application/json|a Google Visualization Provider `datasource` javascript callback|
-| `datatable`|[json](https://github.com/netdata/netdata/blob/master/web/api/formatters/json/README.md)|application/json|a Google `datatable`|
-| `html`|[csv](https://github.com/netdata/netdata/blob/master/web/api/formatters/csv/README.md)|text/html|an html table|
-| `json`|[json](https://github.com/netdata/netdata/blob/master/web/api/formatters/json/README.md)|application/json|a JSON object|
-| `jsonp`|[json](https://github.com/netdata/netdata/blob/master/web/api/formatters/json/README.md)|application/json|a JSONP javascript callback|
-| `markdown`|[csv](https://github.com/netdata/netdata/blob/master/web/api/formatters/csv/README.md)|text/plain|a markdown table|
-| `ssv`|[ssv](https://github.com/netdata/netdata/blob/master/web/api/formatters/ssv/README.md)|text/plain|a space separated list of values|
-| `ssvcomma`|[ssv](https://github.com/netdata/netdata/blob/master/web/api/formatters/ssv/README.md)|text/plain|a comma separated list of values|
-| `tsv`|[csv](https://github.com/netdata/netdata/blob/master/web/api/formatters/csv/README.md)|text/plain|a TAB delimited `csv` (MS Excel flavor)|
-
-For examples of each format, check the relative module documentation.
-
-## Metadata with the `jsonwrap` option
-
-All data queries can be encapsulated to JSON object having metadata about the query and the results.
-
-This is done by adding the `options=jsonwrap` to the API URL (if there are other `options` append
-`,jsonwrap` to the existing ones).
-
-This is such an object:
-
-```bash
-# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&after=-3600&points=6&group=average&format=csv&options=nonzero,jsonwrap'
-{
- "api": 1,
- "id": "system.cpu",
- "name": "system.cpu",
- "view_update_every": 600,
- "update_every": 1,
- "first_entry": 1540387074,
- "last_entry": 1540647070,
- "before": 1540647000,
- "after": 1540644000,
- "dimension_names": ["steal", "softirq", "user", "system", "iowait"],
- "dimension_ids": ["steal", "softirq", "user", "system", "iowait"],
- "latest_values": [0, 0.2493766, 1.745636, 0.4987531, 0],
- "view_latest_values": [0.0158314, 0.0516506, 0.866549, 0.7196127, 0.0050002],
- "dimensions": 5,
- "points": 6,
- "format": "csv",
- "result": "time,steal,softirq,user,system,iowait\n2018-10-27 13:30:00,0.0158314,0.0516506,0.866549,0.7196127,0.0050002\n2018-10-27 13:20:00,0.0149856,0.0529183,0.8673155,0.7121144,0.0049979\n2018-10-27 13:10:00,0.0137501,0.053315,0.8578097,0.7197613,0.0054209\n2018-10-27 13:00:00,0.0154252,0.0554688,0.899432,0.7200638,0.0067252\n2018-10-27 12:50:00,0.0145866,0.0495922,0.8404341,0.7011141,0.0041688\n2018-10-27 12:40:00,0.0162366,0.0595954,0.8827475,0.7020573,0.0041636\n",
- "min": 0,
- "max": 0
-}
-```
-
-## Downloading data query result files
-
-Following the [Google Visualization Provider guidelines](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source),
-Netdata supports parsing `tqx` options.
-
-Using these options, any Netdata data query can instruct the web browser to download
-the result and save it under a given filename.
-
-For example, to download a CSV file with CPU utilization of the last hour,
-[click here](https://registry.my-netdata.io/api/v1/data?chart=system.cpu&after=-3600&format=csv&options=nonzero&tqx=outFileName:system+cpu+utilization+of+the+last_hour.csv).
-
-This is done by appending `&tqx=outFileName:FILENAME` to any data query.
-The output will be in the format given with `&format=`.
-
-
diff --git a/web/api/formatters/csv/Makefile.am b/web/api/formatters/csv/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/formatters/csv/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/formatters/csv/README.md b/web/api/formatters/csv/README.md
deleted file mode 100644
index 4585710b4..000000000
--- a/web/api/formatters/csv/README.md
+++ /dev/null
@@ -1,148 +0,0 @@
-<!--
-title: "CSV formatter"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/csv/README.md
-sidebar_label: "CSV formatter"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Formatters"
--->
-
-# CSV formatter
-
-The CSV formatter presents [results of database queries](https://github.com/netdata/netdata/blob/master/web/api/queries/README.md) in the following formats:
-
-| format|content type|description|
-| :----:|:----------:|:----------|
-| `csv`|text/plain|a text table, comma separated, with a header line (dimension names) and `\r\n` at the end of the lines|
-| `csvjsonarray`|application/json|a JSON array, with each row as another array (the first row has the dimension names)|
-| `tsv`|text/plain|like `csv` but TAB is used instead of comma to separate values (MS Excel flavor)|
-| `html`|text/html|an html table|
-| `markdown`|text/plain|markdown table|
-
-In all formats the date and time is the first column.
-
-The CSV formatter respects the following API `&options=`:
-
-| option|supported|description|
-|:----:|:-------:|:----------|
-| `nonzero`|yes|to return only the dimensions that have at least a non-zero value|
-| `flip`|yes|to return the rows older to newer (the default is newer to older)|
-| `seconds`|yes|to return the date and time in unix timestamp|
-| `ms`|yes|to return the date and time in unit timestamp as milliseconds|
-| `percent`|yes|to replace all values with their percentage over the row total|
-| `abs`|yes|to turn all values positive|
-| `null2zero`|yes|to replace gaps with zeros (the default prints the string `null`|
-
-## Examples
-
-Get the system total bandwidth for all physical network interfaces, over the last hour,
-in 6 rows (one for every 10 minutes), in `csv` format:
-
-Netdata always returns bandwidth in `kilobits`.
-
-```bash
-# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.net&format=csv&after=-3600&group=sum&points=6&options=abs'
-time,received,sent
-2018-10-26 23:50:00,90214.67847,215137.79762
-2018-10-26 23:40:00,90126.32286,238587.57522
-2018-10-26 23:30:00,86061.22688,213389.23526
-2018-10-26 23:20:00,85590.75164,206129.01608
-2018-10-26 23:10:00,83163.30691,194311.77384
-2018-10-26 23:00:00,85167.29657,197538.07773
-```
-
----
-
-Get the max RAM used by the SQL server and any cron jobs, over the last hour, in 2 rows (one for every 30
-minutes), in `tsv` format, and format the date and time as unix timestamp:
-
-Netdata always returns memory in `MB`.
-
-```bash
-# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=apps.mem&format=tsv&after=-3600&group=max&points=2&options=nonzero,seconds&dimensions=sql,cron'
-time sql cron
-1540598400 61.95703 0.25
-1540596600 61.95703 0.25
-```
-
----
-
-Get an HTML table of the last 4 values (4 seconds) of system CPU utilization:
-
-Netdata always returns CPU utilization as `%`.
-
-```bash
-# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&format=html&after=-4&options=nonzero'
-<html>
-<center>
-<table border="0" cellpadding="5" cellspacing="5">
-<tr><td>time</td><td>softirq</td><td>user</td><td>system</td></tr>
-<tr><td>2018-10-27 00:16:07</td><td>0.25</td><td>1</td><td>0.75</td></tr>
-<tr><td>2018-10-27 00:16:06</td><td>0</td><td>1.0025063</td><td>0.5012531</td></tr>
-<tr><td>2018-10-27 00:16:05</td><td>0</td><td>1</td><td>0.75</td></tr>
-<tr><td>2018-10-27 00:16:04</td><td>0</td><td>1.0025063</td><td>0.7518797</td></tr>
-</table>
-</center>
-</html>
-```
-
-This is how it looks when rendered by a web browser:
-
-![image](https://user-images.githubusercontent.com/2662304/47597887-bafbf480-d99c-11e8-864a-d880bb8d2e5b.png)
-
----
-
-Get a JSON array with the average bandwidth rate of the mysql server, over the last hour, in 6 values
-(one every 10 minutes), and return the date and time in milliseconds:
-
-Netdata always returns bandwidth rates in `kilobits/s`.
-
-```bash
-# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=mysql_local.net&format=csvjsonarray&after=-3600&points=6&group=average&options=abs,ms'
-[
-["time","in","out"],
-[1540599600000,0.7499986,120.2810185],
-[1540599000000,0.7500019,120.2815509],
-[1540598400000,0.7499999,120.2812319],
-[1540597800000,0.7500044,120.2819634],
-[1540597200000,0.7499968,120.2807337],
-[1540596600000,0.7499988,120.2810527]
-]
-```
-
----
-
-Get the number of processes started per minute, for the last 10 minutes, in `markdown` format:
-
-```bash
-# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.forks&format=markdown&after=-600&points=10&group=sum'
-time | started
-:---: |:---:
-2018-10-27 03:52:00| 245.1706149
-2018-10-27 03:51:00| 152.6654636
-2018-10-27 03:50:00| 163.1755789
-2018-10-27 03:49:00| 176.1574766
-2018-10-27 03:48:00| 178.0137076
-2018-10-27 03:47:00| 183.8306543
-2018-10-27 03:46:00| 264.1635621
-2018-10-27 03:45:00| 205.001551
-2018-10-27 03:44:00| 7026.9852167
-2018-10-27 03:43:00| 205.9904794
-```
-
-And this is how it looks when formatted:
-
-| time | started |
-|:--:|:-----:|
-| 2018-10-27 03:52:00 | 245.1706149 |
-| 2018-10-27 03:51:00 | 152.6654636 |
-| 2018-10-27 03:50:00 | 163.1755789 |
-| 2018-10-27 03:49:00 | 176.1574766 |
-| 2018-10-27 03:48:00 | 178.0137076 |
-| 2018-10-27 03:47:00 | 183.8306543 |
-| 2018-10-27 03:46:00 | 264.1635621 |
-| 2018-10-27 03:45:00 | 205.001551 |
-| 2018-10-27 03:44:00 | 7026.9852167 |
-| 2018-10-27 03:43:00 | 205.9904794 |
-
-
diff --git a/web/api/formatters/json/Makefile.am b/web/api/formatters/json/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/formatters/json/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/formatters/json/README.md b/web/api/formatters/json/README.md
deleted file mode 100644
index bc70aec02..000000000
--- a/web/api/formatters/json/README.md
+++ /dev/null
@@ -1,160 +0,0 @@
-<!--
-title: "JSON formatter"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/json/README.md
-sidebar_label: "JSON formatter"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Formatters"
--->
-
-# JSON formatter
-
-The CSV formatter presents [results of database queries](https://github.com/netdata/netdata/blob/master/web/api/queries/README.md) in the following formats:
-
-| format | content type | description|
-|:----:|:----------:|:----------|
-| `json` | application/json | return the query result as a json object|
-| `jsonp` | application/json | return the query result as a JSONP javascript callback|
-| `datatable` | application/json | return the query result as a Google `datatable`|
-| `datasource` | application/json | return the query result as a Google Visualization Provider `datasource` javascript callback|
-
-The CSV formatter respects the following API `&options=`:
-
-| option | supported | description|
-|:----:|:-------:|:----------|
-| `google_json` | yes | enable the Google flavor of JSON (using double quotes for strings and `Date()` function for dates|
-| `objectrows` | yes | return each row as an object, instead of an array|
-| `nonzero` | yes | to return only the dimensions that have at least a non-zero value|
-| `flip` | yes | to return the rows older to newer (the default is newer to older)|
-| `seconds` | yes | to return the date and time in unix timestamp|
-| `ms` | yes | to return the date and time in unit timestamp as milliseconds|
-| `percent` | yes | to replace all values with their percentage over the row total|
-| `abs` | yes | to turn all values positive|
-| `null2zero` | yes | to replace gaps with zeros (the default prints the string `null`|
-
-## Examples
-
-To show the differences between each format, in the following examples we query the same
-chart (having just one dimension called `active`), changing only the query `format` and its `options`.
-
-> Using `format=json` and `options=`
-
-```bash
-# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=json&options='
-{
- "labels": ["time", "active"],
- "data":
- [
- [ 1540644600, 224.2516667],
- [ 1540644000, 229.29],
- [ 1540643400, 222.41],
- [ 1540642800, 226.6816667],
- [ 1540642200, 246.4083333],
- [ 1540641600, 241.0966667]
- ]
-}
-```
-
-> Using `format=json` and `options=objectrows`
-
-```bash
-# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=json&options=objectrows'
-{
- "labels": ["time", "active"],
- "data":
- [
- { "time": 1540644600, "active": 224.2516667},
- { "time": 1540644000, "active": 229.29},
- { "time": 1540643400, "active": 222.41},
- { "time": 1540642800, "active": 226.6816667},
- { "time": 1540642200, "active": 246.4083333},
- { "time": 1540641600, "active": 241.0966667}
- ]
-}
-```
-
-> Using `format=json` and `options=objectrows,google_json`
-
-```bash
-# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formatjson&options=objectrows,google_json'
-{
- "labels": ["time", "active"],
- "data":
- [
- { "time": new Date(2018,9,27,12,50,0), "active": 224.2516667},
- { "time": new Date(2018,9,27,12,40,0), "active": 229.29},
- { "time": new Date(2018,9,27,12,30,0), "active": 222.41},
- { "time": new Date(2018,9,27,12,20,0), "active": 226.6816667},
- { "time": new Date(2018,9,27,12,10,0), "active": 246.4083333},
- { "time": new Date(2018,9,27,12,0,0), "active": 241.0966667}
- ]
-}
-```
-
-> Using `format=jsonp` and `options=`
-
-```bash
-curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formjsonp&options='
-callback({
- "labels": ["time", "active"],
- "data":
- [
- [ 1540645200, 235.885],
- [ 1540644600, 224.2516667],
- [ 1540644000, 229.29],
- [ 1540643400, 222.41],
- [ 1540642800, 226.6816667],
- [ 1540642200, 246.4083333]
- ]
-});
-```
-
-> Using `format=datatable` and `options=`
-
-```bash
-curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&formdatatable&options='
-{
- "cols":
- [
- {"id":"","label":"time","pattern":"","type":"datetime"},
- {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotation"}},
- {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotationText"}},
- {"id":"","label":"active","pattern":"","type":"number"}
- ],
- "rows":
- [
- {"c":[{"v":"Date(2018,9,27,13,0,0)"},{"v":null},{"v":null},{"v":235.885}]},
- {"c":[{"v":"Date(2018,9,27,12,50,0)"},{"v":null},{"v":null},{"v":224.2516667}]},
- {"c":[{"v":"Date(2018,9,27,12,40,0)"},{"v":null},{"v":null},{"v":229.29}]},
- {"c":[{"v":"Date(2018,9,27,12,30,0)"},{"v":null},{"v":null},{"v":222.41}]},
- {"c":[{"v":"Date(2018,9,27,12,20,0)"},{"v":null},{"v":null},{"v":226.6816667}]},
- {"c":[{"v":"Date(2018,9,27,12,10,0)"},{"v":null},{"v":null},{"v":246.4083333}]}
- ]
-}
-```
-
-> Using `format=datasource` and `options=`
-
-```bash
-curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&after=-3600&points=6&group=average&format=datasource&options='
-google.visualization.Query.setResponse({version:'0.6',reqId:'0',status:'ok',sig:'1540645368',table:{
- "cols":
- [
- {"id":"","label":"time","pattern":"","type":"datetime"},
- {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotation"}},
- {"id":"","label":"","pattern":"","type":"string","p":{"role":"annotationText"}},
- {"id":"","label":"active","pattern":"","type":"number"}
- ],
- "rows":
- [
- {"c":[{"v":"Date(2018,9,27,13,0,0)"},{"v":null},{"v":null},{"v":235.885}]},
- {"c":[{"v":"Date(2018,9,27,12,50,0)"},{"v":null},{"v":null},{"v":224.2516667}]},
- {"c":[{"v":"Date(2018,9,27,12,40,0)"},{"v":null},{"v":null},{"v":229.29}]},
- {"c":[{"v":"Date(2018,9,27,12,30,0)"},{"v":null},{"v":null},{"v":222.41}]},
- {"c":[{"v":"Date(2018,9,27,12,20,0)"},{"v":null},{"v":null},{"v":226.6816667}]},
- {"c":[{"v":"Date(2018,9,27,12,10,0)"},{"v":null},{"v":null},{"v":246.4083333}]}
- ]
-}});
-```
-
-
diff --git a/web/api/formatters/ssv/Makefile.am b/web/api/formatters/ssv/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/formatters/ssv/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/formatters/ssv/README.md b/web/api/formatters/ssv/README.md
deleted file mode 100644
index 434d56721..000000000
--- a/web/api/formatters/ssv/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-<!--
-title: "SSV formatter"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/ssv/README.md
-sidebar_label: "SSV formatter"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Formatters"
--->
-
-# SSV formatter
-
-The SSV formatter sums all dimensions in [results of database queries](https://github.com/netdata/netdata/blob/master/web/api/queries/README.md)
-to a single value and returns a list of such values showing how it changes through time.
-
-It supports the following formats:
-
-| format | content type | description |
-|:----:|:----------:|:----------|
-| `ssv` | text/plain | a space separated list of values |
-| `ssvcomma` | text/plain | a comma separated list of values |
-| `array` | application/json | a JSON array |
-
-The SSV formatter respects the following API `&options=`:
-
-| option | supported | description |
-| :----:|:-------:|:----------|
-| `nonzero` | yes | to return only the dimensions that have at least a non-zero value |
-| `flip` | yes | to return the numbers older to newer (the default is newer to older) |
-| `percent` | yes | to replace all values with their percentage over the row total |
-| `abs` | yes | to turn all values positive, before using them |
-| `min2max` | yes | to return the delta from the minimum value to the maximum value (across dimensions) |
-
-## Examples
-
-Get the average system CPU utilization of the last hour, in 6 values (one every 10 minutes):
-
-```bash
-# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=system.cpu&format=ssv&after=-3600&points=6&group=average'
-1.741352 1.6800467 1.769411 1.6761112 1.629862 1.6807968
-```
-
----
-
-Get the total mysql bandwidth (in + out) for the last hour, in 6 values (one every 10 minutes):
-
-Netdata returns bandwidth in `kilobits`.
-
-```bash
-# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=mysql_local.net&format=ssvcomma&after=-3600&points=6&group=sum&options=abs'
-72618.7936215,72618.778889,72618.788084,72618.9195918,72618.7760612,72618.6712421
-```
-
----
-
-Get the web server max connections for the last hour, in 12 values (one every 5 minutes)
-in a JSON array:
-
-```bash
-# curl -Ss 'https://registry.my-netdata.io/api/v1/data?chart=nginx_local.connections&format=array&after=-3600&points=12&group=max'
-[278,258,268,239,259,260,243,266,278,318,264,258]
-```
-
-
diff --git a/web/api/formatters/value/Makefile.am b/web/api/formatters/value/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/formatters/value/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/formatters/value/README.md b/web/api/formatters/value/README.md
deleted file mode 100644
index 5631d8207..000000000
--- a/web/api/formatters/value/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
-<!--
-title: "Value formatter"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/value/README.md
-sidebar_label: "Value formatter"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Formatters"
--->
-
-# Value formatter
-
-The Value formatter presents [results of database queries](https://github.com/netdata/netdata/blob/master/web/api/queries/README.md) as a single value.
-
-To calculate the single value to be returned, it sums the values of all dimensions.
-
-The Value formatter respects the following API `&options=`:
-
-| option | supported | description |
-|:----: |:-------: |:---------- |
-| `percent` | yes | to replace all values with their percentage over the row total|
-| `abs` | yes | to turn all values positive, before using them |
-| `min2max` | yes | to return the delta from the minimum value to the maximum value (across dimensions)|
-
-The Value formatter is not exposed by the API by itself.
-Instead it is used by the [`ssv`](https://github.com/netdata/netdata/blob/master/web/api/formatters/ssv/README.md) formatter
-and [health monitoring queries](https://github.com/netdata/netdata/blob/master/health/README.md).
-
-
diff --git a/web/api/health/Makefile.am b/web/api/health/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/health/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/health/README.md b/web/api/health/README.md
deleted file mode 100644
index b8e2c7291..000000000
--- a/web/api/health/README.md
+++ /dev/null
@@ -1,222 +0,0 @@
-<!--
-title: "Health API Calls"
-date: 2020-04-27
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/health/README.md
-sidebar_label: "Health API Calls"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api"
--->
-
-# Health API Calls
-
-## Health Read API
-
-### Enabled Alerts
-
-Netdata enables alerts on demand, i.e. when the chart they should be linked to starts collecting data. So, although many
-more alerts are configured, only the useful ones are enabled.
-
-To get the list of all enabled alerts, open your browser and navigate to `http://NODE:19999/api/v1/alarms?all`,
-replacing `NODE` with the IP address or hostname for your Agent dashboard.
-
-### Raised Alerts
-
-This API call will return the alerts currently in WARNING or CRITICAL state.
-
-`http://NODE:19999/api/v1/alarms`
-
-### Event Log
-
-The size of the alert log is configured in `netdata.conf`. There are 2 settings: the event history kept in the DB (in seconds), and the in memory size of the alert log.
-
-```
-[health]
- in memory max health log entries = 1000
- health log history = 432000
-```
-
-The API call retrieves all entries of the alert log:
-
-`http://NODE:19999/api/v1/alarm_log`
-
-### Alert Log Incremental Updates
-
-`http://NODE:19999/api/v1/alarm_log?after=UNIQUEID`
-
-The above returns all the events in the alert log that occurred after UNIQUEID (you poll it once without `after=`, remember the last UNIQUEID of the returned set, which you give back to get incrementally the next events).
-
-### Alert badges
-
-The following will return an SVG badge of the alert named `NAME`, attached to the chart named `CHART`.
-
-`http://NODE:19999/api/v1/badge.svg?alarm=NAME&chart=CHART`
-
-## Health Management API
-
-Netdata v1.12 and beyond provides a command API to control health checks and notifications at runtime. The feature is especially useful for maintenance periods, during which you receive meaningless alerts.
-From Netdata v1.16.0 and beyond, the configuration controlled via the API commands is [persisted across Netdata restarts](#persistence).
-
-Specifically, the API allows you to:
-
-- Disable health checks completely. Alert conditions will not be evaluated at all and no entries will be added to the alert log.
-- Silence alert notifications. Alert conditions will be evaluated, the alerts will appear in the log and the Netdata UI will show the alerts as active, but no notifications will be sent.
-- Disable or Silence specific alerts that match selectors on alert/template name, chart, context, and host.
-
-The API is available by default, but it is protected by an `api authorization token` that is stored in the file you will see in the following entry of `http://NODE:19999/netdata.conf`:
-
-```
-[registry]
- # netdata management api key file = /var/lib/netdata/netdata.api.key
-```
-
-You can access the API via GET requests, by adding the bearer token to an `Authorization` http header, like this:
-
-```
-curl "http://NODE:19999/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken"
-```
-
-By default access to the health management API is only allowed from `localhost`. Accessing the API from anything else will return a 403 error with the message `You are not allowed to access this resource.`. You can change permissions by editing the `allow management from` variable in `netdata.conf` within the [web] section. See [web server access lists](https://github.com/netdata/netdata/blob/master/web/server/README.md#access-lists) for more information.
-
-The command `RESET` just returns Netdata to the default operation, with all health checks and notifications enabled.
-If you've configured and entered your token correctly, you should see the plain text response `All health checks and notifications are enabled`.
-
-### Disable or silence all alerts
-
-If all you need is temporarily disable all health checks, then you issue the following before your maintenance period starts:
-
-```sh
-curl "http://NODE:19999/api/v1/manage/health?cmd=DISABLE%20ALL" -H "X-Auth-Token: Mytoken"
-```
-
-The effect of disabling health checks is that the alert criteria are not evaluated at all and nothing is written in the alert log.
-If you want the health checks to be running but to not receive any notifications during your maintenance period, you can instead use this:
-
-```sh
-curl "http://NODE:19999/api/v1/manage/health?cmd=SILENCE%20ALL" -H "X-Auth-Token: Mytoken"
-```
-
-Alerts may then still be raised and logged in Netdata, so you'll be able to see them via the UI.
-
-Regardless of the option you choose, at the end of your maintenance period you revert to the normal state via the RESET command.
-
-```sh
- curl "http://NODE:19999/api/v1/manage/health?cmd=RESET" -H "X-Auth-Token: Mytoken"
-```
-
-### Disable or silence specific alerts
-
-If you do not wish to disable/silence all alerts, then the `DISABLE ALL` and `SILENCE ALL` commands can't be used.
-Instead, the following commands expect that one or more alert selectors will be added, so that only alerts that match the selectors are disabled or silenced.
-
-- `DISABLE` : Set the mode to disable health checks.
-- `SILENCE` : Set the mode to silence notifications.
-
-You will normally put one of these commands in the same request with your first alert selector, but it's possible to issue them separately as well.
-You will get a warning in the response, if a selector was added without a SILENCE/DISABLE command, or vice versa.
-
-Each request can specify a single alert `selector`, with one or more `selection criteria`.
-A single alert will match a `selector` if all selection criteria match the alert.
-You can add as many selectors as you like.
-In essence, the rule is: IF (alert matches all the criteria in selector1 OR all the criteria in selector2 OR ...) THEN apply the DISABLE or SILENCE command.
-
-To clear all selectors and reset the mode to default, use the `RESET` command.
-
-The following example silences notifications for all the alerts with context=load:
-
-```
-curl "http://NODE:19999/api/v1/manage/health?cmd=SILENCE&context=load" -H "X-Auth-Token: Mytoken"
-```
-
-#### Selection criteria
-
-The `selection criteria` are key/value pairs, in the format `key : value`, where value is a Netdata [simple pattern](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md). This means that you can create very powerful selectors (you will rarely need more than one or two).
-
-The accepted keys for the `selection criteria` are the following:
-
-- `alarm` : The expression provided will match both `alarm` and `template` names.
-- `chart` : Chart ids/names, as shown on the dashboard. These will match the `on` entry of a configured `alarm`.
-- `context` : Chart context, as shown on the dashboard. These will match the `on` entry of a configured `template`.
-- `hosts` : The hostnames that will need to match.
-
-You can add any of the selection criteria you need on the request, to ensure that only the alerts you are interested in are matched and disabled/silenced. e.g. there is no reason to add `hosts: *`, if you want the criteria to be applied to alerts for all hosts.
-
-Example 1: Disable all health checks for context = `random`
-
-```
-http://NODE:19999/api/v1/manage/health?cmd=DISABLE&context=random
-```
-
-Example 2: Silence all alerts and templates with name starting with `out_of` on host `myhost`
-
-```
-http://NODE:19999/api/v1/manage/health?cmd=SILENCE&alarm=out_of*&hosts=myhost
-```
-
-### List silencers
-
-The command `LIST` was added in Netdata v1.16.0 and returns a JSON with the current status of the silencers.
-
-```
- curl "http://NODE:19999/api/v1/manage/health?cmd=LIST" -H "X-Auth-Token: Mytoken"
-```
-
-As an example, the following response shows that we have two silencers configured, one for an alert called `samplealert` and one for alerts with context `random` on host `myhost`
-
-```
-json
-{
- "all": false,
- "type": "SILENCE",
- "silencers": [
- {
- "alarm": "samplealert"
- },
- {
- "context": "random",
- "hosts": "myhost"
- }
- ]
-}
-```
-
-The response below shows that we have disabled all health checks.
-
-```
-json
-{
- "all": true,
- "type": "DISABLE",
- "silencers": []
-}
-```
-
-### Responses
-
-- "Auth Error" : Token authentication failed
-- "All alarm notifications are silenced" : Successful response to cmd=SILENCE ALL
-- "All health checks are disabled" : Successful response to cmd=DISABLE ALL
-- "All health checks and notifications are enabled" : Successful response to cmd=RESET
-- "Health checks disabled for alarms matching the selectors" : Added to the response for a cmd=DISABLE
-- "Alarm notifications silenced for alarms matching the selectors" : Added to the response for a cmd=SILENCE
-- "Alarm selector added" : Added to the response when a new selector is added
-- "Invalid key. Ignoring it." : Wrong name of a parameter. Added to the response and ignored.
-- "WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command." : Added to the response if a selector is added without a selector-specific command.
-- "WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors." : Added to the response if a selector-specific command is issued without a selector.
-
-### Persistence
-
-From Netdata v1.16.0 and beyond, the silencers configuration is persisted to disk and loaded when Netdata starts.
-The JSON string returned by the [LIST command](#list-silencers) is automatically saved to the `silencers file`, every time a command alters the silencers configuration.
-The file's location is configurable in `netdata.conf`. The default is shown below:
-
-```
-[health]
- # silencers file = /var/lib/netdata/health.silencers.json
-```
-
-### Further reading
-
-The test script under [tests/health_mgmtapi](https://github.com/netdata/netdata/blob/master/tests/health_mgmtapi/README.md) contains a series of tests that you can either run or read through to understand the various calls and responses better.
-
-
diff --git a/web/api/health/health_cmdapi.c b/web/api/health/health_cmdapi.c
deleted file mode 100644
index 27d062653..000000000
--- a/web/api/health/health_cmdapi.c
+++ /dev/null
@@ -1,204 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-//
-// Created by Christopher on 11/12/18.
-//
-
-#include "health_cmdapi.h"
-
-/**
- * Free Silencers
- *
- * Clean the silencer structure
- *
- * @param t is the structure that will be cleaned.
- */
-void free_silencers(SILENCER *t) {
- if (!t) return;
- if (t->next) free_silencers(t->next);
- netdata_log_debug(D_HEALTH, "HEALTH command API: Freeing silencer %s:%s:%s:%s", t->alarms,
- t->charts, t->contexts, t->hosts);
- simple_pattern_free(t->alarms_pattern);
- simple_pattern_free(t->charts_pattern);
- simple_pattern_free(t->contexts_pattern);
- simple_pattern_free(t->hosts_pattern);
- freez(t->alarms);
- freez(t->charts);
- freez(t->contexts);
- freez(t->hosts);
- freez(t);
- return;
-}
-
-/**
- * Silencers to JSON Entry
- *
- * Fill the buffer with the other values given.
- *
- * @param wb a pointer to the output buffer
- * @param var the json variable
- * @param val the json value
- * @param hasprev has it a previous value?
- *
- * @return
- */
-int health_silencers2json_entry(BUFFER *wb, char* var, char* val, int hasprev) {
- if (val) {
- buffer_sprintf(wb, "%s\n\t\t\t\"%s\": \"%s\"", (hasprev)?",":"", var, val);
- return 1;
- } else {
- return hasprev;
- }
-}
-
-/**
- * Silencer to JSON
- *
- * Write the silencer values using JSON format inside a buffer.
- *
- * @param wb is the buffer to write the silencers.
- */
-void health_silencers2json(BUFFER *wb) {
- buffer_sprintf(wb, "{\n\t\"all\": %s,"
- "\n\t\"type\": \"%s\","
- "\n\t\"silencers\": [",
- (silencers->all_alarms)?"true":"false",
- (silencers->stype == STYPE_NONE)?"None":((silencers->stype == STYPE_DISABLE_ALARMS)?"DISABLE":"SILENCE"));
-
- SILENCER *silencer;
- int i = 0, j = 0;
- for(silencer = silencers->silencers; silencer ; silencer = silencer->next) {
- if(likely(i)) buffer_strcat(wb, ",");
- buffer_strcat(wb, "\n\t\t{");
- j=health_silencers2json_entry(wb, HEALTH_ALARM_KEY, silencer->alarms, j);
- j=health_silencers2json_entry(wb, HEALTH_CHART_KEY, silencer->charts, j);
- j=health_silencers2json_entry(wb, HEALTH_CONTEXT_KEY, silencer->contexts, j);
- j=health_silencers2json_entry(wb, HEALTH_HOST_KEY, silencer->hosts, j);
- j=0;
- buffer_strcat(wb, "\n\t\t}");
- i++;
- }
- if(likely(i)) buffer_strcat(wb, "\n\t");
- buffer_strcat(wb, "]\n}\n");
-}
-
-/**
- * Silencer to FILE
- *
- * Write the silencer buffer to a file.
- * @param wb
- */
-void health_silencers2file(BUFFER *wb) {
- if (wb->len == 0) return;
-
- FILE *fd = fopen(silencers_filename, "wb");
- if(fd) {
- size_t written = (size_t)fprintf(fd, "%s", wb->buffer) ;
- if (written == wb->len ) {
- netdata_log_info("Silencer changes written to %s", silencers_filename);
- }
- fclose(fd);
- return;
- }
- netdata_log_error("Silencer changes could not be written to %s. Error %s", silencers_filename, strerror(errno));
-}
-
-/**
- * Request V1 MGMT Health
- *
- * Function called by api to management the health.
- *
- * @param host main structure with client information!
- * @param w is the structure with all information of the client request.
- * @param url is the url that netdata is working
- *
- * @return It returns 200 on success and another code otherwise.
- */
-int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, char *url) {
- int ret;
- (void) host;
-
- BUFFER *wb = w->response.data;
- buffer_flush(wb);
- wb->content_type = CT_TEXT_PLAIN;
-
- buffer_flush(w->response.data);
-
- //Local instance of the silencer
- SILENCER *silencer = NULL;
- int config_changed = 1;
-
- if (!w->auth_bearer_token) {
- buffer_strcat(wb, HEALTH_CMDAPI_MSG_AUTHERROR);
- ret = HTTP_RESP_FORBIDDEN;
- } else {
- netdata_log_debug(D_HEALTH, "HEALTH command API: Comparing secret '%s' to '%s'", w->auth_bearer_token, api_secret);
- if (strcmp(w->auth_bearer_token, api_secret)) {
- buffer_strcat(wb, HEALTH_CMDAPI_MSG_AUTHERROR);
- ret = HTTP_RESP_FORBIDDEN;
- } else {
- while (url) {
- char *value = strsep_skip_consecutive_separators(&url, "&");
- if (!value || !*value) continue;
-
- char *key = strsep_skip_consecutive_separators(&value, "=");
- if (!key || !*key) continue;
- if (!value || !*value) continue;
-
- netdata_log_debug(D_WEB_CLIENT, "%llu: API v1 health query param '%s' with value '%s'", w->id, key, value);
-
- // name and value are now the parameters
- if (!strcmp(key, "cmd")) {
- if (!strcmp(value, HEALTH_CMDAPI_CMD_SILENCEALL)) {
- silencers->all_alarms = 1;
- silencers->stype = STYPE_SILENCE_NOTIFICATIONS;
- buffer_strcat(wb, HEALTH_CMDAPI_MSG_SILENCEALL);
- } else if (!strcmp(value, HEALTH_CMDAPI_CMD_DISABLEALL)) {
- silencers->all_alarms = 1;
- silencers->stype = STYPE_DISABLE_ALARMS;
- buffer_strcat(wb, HEALTH_CMDAPI_MSG_DISABLEALL);
- } else if (!strcmp(value, HEALTH_CMDAPI_CMD_SILENCE)) {
- silencers->stype = STYPE_SILENCE_NOTIFICATIONS;
- buffer_strcat(wb, HEALTH_CMDAPI_MSG_SILENCE);
- } else if (!strcmp(value, HEALTH_CMDAPI_CMD_DISABLE)) {
- silencers->stype = STYPE_DISABLE_ALARMS;
- buffer_strcat(wb, HEALTH_CMDAPI_MSG_DISABLE);
- } else if (!strcmp(value, HEALTH_CMDAPI_CMD_RESET)) {
- silencers->all_alarms = 0;
- silencers->stype = STYPE_NONE;
- free_silencers(silencers->silencers);
- silencers->silencers = NULL;
- buffer_strcat(wb, HEALTH_CMDAPI_MSG_RESET);
- } else if (!strcmp(value, HEALTH_CMDAPI_CMD_LIST)) {
- w->response.data->content_type = CT_APPLICATION_JSON;
- health_silencers2json(wb);
- config_changed=0;
- }
- } else {
- silencer = health_silencers_addparam(silencer, key, value);
- }
- }
-
- if (likely(silencer)) {
- health_silencers_add(silencer);
- buffer_strcat(wb, HEALTH_CMDAPI_MSG_ADDED);
- if (silencers->stype == STYPE_NONE) {
- buffer_strcat(wb, HEALTH_CMDAPI_MSG_STYPEWARNING);
- }
- }
- if (unlikely(silencers->stype != STYPE_NONE && !silencers->all_alarms && !silencers->silencers)) {
- buffer_strcat(wb, HEALTH_CMDAPI_MSG_NOSELECTORWARNING);
- }
- ret = HTTP_RESP_OK;
- }
- }
- w->response.data = wb;
- buffer_no_cacheable(w->response.data);
- if (ret == HTTP_RESP_OK && config_changed) {
- BUFFER *jsonb = buffer_create(200, &netdata_buffers_statistics.buffers_health);
- health_silencers2json(jsonb);
- health_silencers2file(jsonb);
- buffer_free(jsonb);
- }
-
- return ret;
-}
diff --git a/web/api/health/health_cmdapi.h b/web/api/health/health_cmdapi.h
deleted file mode 100644
index d5309c73f..000000000
--- a/web/api/health/health_cmdapi.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_WEB_HEALTH_SVG_H
-#define NETDATA_WEB_HEALTH_SVG_H 1
-
-#include "libnetdata/libnetdata.h"
-#include "web/server/web_client.h"
-#include "health/health.h"
-
-#define HEALTH_CMDAPI_CMD_SILENCEALL "SILENCE ALL"
-#define HEALTH_CMDAPI_CMD_DISABLEALL "DISABLE ALL"
-#define HEALTH_CMDAPI_CMD_SILENCE "SILENCE"
-#define HEALTH_CMDAPI_CMD_DISABLE "DISABLE"
-#define HEALTH_CMDAPI_CMD_RESET "RESET"
-#define HEALTH_CMDAPI_CMD_LIST "LIST"
-
-#define HEALTH_CMDAPI_MSG_AUTHERROR "Auth Error\n"
-#define HEALTH_CMDAPI_MSG_SILENCEALL "All alarm notifications are silenced\n"
-#define HEALTH_CMDAPI_MSG_DISABLEALL "All health checks are disabled\n"
-#define HEALTH_CMDAPI_MSG_RESET "All health checks and notifications are enabled\n"
-#define HEALTH_CMDAPI_MSG_DISABLE "Health checks disabled for alarms matching the selectors\n"
-#define HEALTH_CMDAPI_MSG_SILENCE "Alarm notifications silenced for alarms matching the selectors\n"
-#define HEALTH_CMDAPI_MSG_ADDED "Alarm selector added\n"
-#define HEALTH_CMDAPI_MSG_STYPEWARNING "WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command.\n"
-#define HEALTH_CMDAPI_MSG_NOSELECTORWARNING "WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors.\n"
-
-int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, char *url);
-
-#include "web/api/web_api_v1.h"
-
-#endif /* NETDATA_WEB_HEALTH_SVG_H */
diff --git a/web/api/ilove/Makefile.am b/web/api/ilove/Makefile.am
deleted file mode 100644
index 7250b045f..000000000
--- a/web/api/ilove/Makefile.am
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- measure-text.js \
- $(NULL)
diff --git a/web/api/queries/Makefile.am b/web/api/queries/Makefile.am
deleted file mode 100644
index 7c4c43520..000000000
--- a/web/api/queries/Makefile.am
+++ /dev/null
@@ -1,23 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-SUBDIRS = \
- average \
- countif \
- des \
- incremental_sum \
- max \
- min \
- sum \
- median \
- percentile \
- ses \
- stddev \
- trimmed_mean \
- $(NULL)
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/queries/README.md b/web/api/queries/README.md
deleted file mode 100644
index dacd2900e..000000000
--- a/web/api/queries/README.md
+++ /dev/null
@@ -1,181 +0,0 @@
-# Database queries/lookup
-
-This document explains in detail the options available to retrieve data from the Netdata timeseries database in order to configure alerts, create badges or
-create custom charts.
-
-The Netdata database can be queried with the `/api/v1/data` and `/api/v1/badge.svg` REST API methods. The database is also queried from the `lookup` line
-in an [alert configuration](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md).
-
-Every data query accepts the following parameters:
-
-|name|required|description|
-|:--:|:------:|:----------|
-|`chart`|yes|The chart to be queried.|
-|`points`|no|The number of points to be returned. Netdata can reduce number of points by applying query grouping methods. If not given, the result will have the same granularity as the database (although this relates to `gtime`).|
-|`before`|no|The absolute timestamp or the relative (to now) time the query should finish evaluating data. If not given, it defaults to the timestamp of the latest point in the database.|
-|`after`|no|The absolute timestamp or the relative (to `before`) time the query should start evaluating data. if not given, it defaults to the timestamp of the oldest point in the database.|
-|`group`|no|The grouping method to use when reducing the points the database has. If not given, it defaults to `average`.|
-|`gtime`|no|A resampling period to change the units of the metrics (i.e. setting this to `60` will convert `per second` metrics to `per minute`. If not given it defaults to granularity of the database.|
-|`options`|no|A bitmap of options that can affect the operation of the query. Only 2 options are used by the query engine: `unaligned` and `percentage`. All the other options are used by the output formatters. The default is to return aligned data.|
-|`dimensions`|no|A simple pattern to filter the dimensions to be queried. The default is to return all the dimensions of the chart.|
-
-## Operation
-
-The query engine works as follows (in this order):
-
-#### Time-frame
-
-`after` and `before` define a time-frame, accepting:
-
-- **absolute timestamps** (unix timestamps, i.e. seconds since epoch).
-
-- **relative timestamps**:
-
- `before` is relative to now and `after` is relative to `before`.
-
- Example: `before=-60&after=-60` evaluates to the time-frame from -120 up to -60 seconds in
- the past, relative to the latest entry of the database of the chart.
-
-The engine verifies that the time-frame requested is available at the database:
-
-- If the requested time-frame overlaps with the database, the excess requested
- will be truncated.
-
-- If the requested time-frame does not overlap with the database, the engine will
- return an empty data set.
-
-At the end of this operation, `after` and `before` are absolute timestamps.
-
-#### Data grouping
-
-Database points grouping is applied when the caller requests a time-frame to be
-expressed with fewer points, compared to what is available at the database.
-
-There are 2 uses that enable this feature:
-
-- The caller requests a specific number of `points` to be returned.
-
- For example, for a time-frame of 10 minutes, the database has 600 points (1/sec),
- while the caller requested these 10 minutes to be expressed in 200 points.
-
- This feature is used by Netdata dashboards when you zoom-out the charts.
- The dashboard is requesting the number of points the user's screen has.
- This saves bandwidth and speeds up the browser (fewer points to evaluate for drawing the charts).
-- The caller requests a **re-sampling** of the database, by setting `gtime` to any value
- above the granularity of the chart.
-
- For example, the chart's units is `requests/sec` and caller wants `requests/min`.
-
-Using `points` and `gtime` the query engine tries to find a best fit for **database-points**
-vs **result-points** (we call this ratio `group points`). It always tries to keep `group points`
-an integer. Keep in mind the query engine may shift `after` if required. See also the [example](#example).
-
-#### Time-frame Alignment
-
-Alignment is a very important aspect of Netdata queries. Without it, the animated
-charts on the dashboards would constantly [change shape](#example) during incremental updates.
-
-To provide consistent grouping through time, the query engine (by default) aligns
-`after` and `before` to be a multiple of `group points`.
-
-For example, if `group points` is 60 and alignment is enabled, the engine will return
-each point with durations XX:XX:00 - XX:XX:59, matching whole minutes.
-
-To disable alignment, pass `&options=unaligned` to the query.
-
-#### Query Execution
-
-To execute the query, the engine evaluates all dimensions of the chart, one after another.
-
-The engine does not evaluate dimensions that do not match the [simple pattern](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md)
-given at the `dimensions` parameter, except when `options=percentage` is given (this option
-requires all the dimensions to be evaluated to find the percentage of each dimension vs to chart
-total).
-
-For each dimension, it starts evaluating values starting at `after` (not inclusive) towards
-`before` (inclusive).
-
-For each value it calls the **grouping method** given with the `&group=` query parameter
-(the default is `average`).
-
-## Grouping methods
-
-The following grouping methods are supported. These are given all the values in the time-frame
-and they group the values every `group points`.
-
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=min&after=-60&label=min&value_color=blue) finds the minimum value
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=max&after=-60&label=max&value_color=lightblue) finds the maximum value
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=average&after=-60&label=average&value_color=yellow) finds the average value
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=sum&units=kilobits&after=-60&label=sum&value_color=orange) adds all the values and returns the sum
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=median&after=-60&label=median&value_color=red) sorts the values and returns the value in the middle of the list
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=stddev&after=-60&label=stddev&value_color=green) finds the standard deviation of the values
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=cv&after=-60&label=cv&units=pcent&value_color=yellow) finds the relative standard deviation (coefficient of variation) of the values
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=ses&after=-60&label=ses&value_color=brown) finds the exponential weighted moving average of the values
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=des&after=-60&label=des&value_color=blue) applies Holt-Winters double exponential smoothing
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=incremental_sum&after=-60&label=incremental_sum&value_color=red) finds the difference of the last vs the first value
-
-The examples shown above show live information from the `received` traffic on the `eth0` interface of the global Netdata registry.
-Inspect any of the badges to see the parameters provided. You can directly issue the request to the registry server's API yourself, e.g. by
-passing the following to get the value shown on the badge for the sum of the values within the period:
-
-```
-https://registry.my-netdata.io/api/v1/data?chart=net.eth0&options=unaligned&dimensions=received&group=sum&units=kilobits&after=-60&label=sum&points=1
-```
-
-## Further processing
-
-The result of the query engine is always a structure that has dimensions and values
-for each dimension.
-
-Formatting modules are then used to convert this result in many different formats and return it
-to the caller.
-
-## Performance
-
-The query engine is highly optimized for speed. Most of its modules implement "online"
-versions of the algorithms, requiring just one pass on the database values to produce
-the result.
-
-## Example
-
-When Netdata is reducing metrics, it tries to return always the same boundaries. So, if we want 10s averages, it will always return points starting at a `unix timestamp % 10 = 0`.
-
-Let's see why this is needed, by looking at the error case.
-
-Assume we have 5 points:
-
-|time|value|
-|:--:|:---:|
-|00:01|1|
-|00:02|2|
-|00:03|3|
-|00:04|4|
-|00:05|5|
-
-At 00:04 you ask for 2 points for 4 seconds in the past. So `group = 2`. Netdata would return:
-
-|point|time|value|
-|:---:|:--:|:---:|
-|1|00:01 - 00:02|1.5|
-|2|00:03 - 00:04|3.5|
-
-A second later the chart is to be refreshed, and makes again the same request at 00:05. These are the points that would have been returned:
-
-|point|time|value|
-|:---:|:--:|:---:|
-|1|00:02 - 00:03|2.5|
-|2|00:04 - 00:05|4.5|
-
-**Wait a moment!** The chart was shifted just one point and it changed value! Point 2 was 3.5 and when shifted to point 1 is 2.5! If you see this in a chart, it's a mess. The charts change shape constantly.
-
-For this reason, Netdata always aligns the data it returns to the `group`.
-
-When you request `points=1`, Netdata understands that you need 1 point for the whole database, so `group = 3600`. Then it tries to find the starting point which would be `timestamp % 3600 = 0` Within a database of 3600 seconds, there is one such point for sure. Then it tries to find the average of 3600 points. But, most probably it will not find 3600 of them (for just 1 out of 3600 seconds this query will return something).
-
-So, the proper way to query the database is to also set at least `after`. The following call will returns 1 point for the last complete 10-second duration (it starts at `timestamp % 10 = 0`):
-
-<http://netdata.firehol.org/api/v1/data?chart=system.cpu&points=1&after=-10&options=seconds>
-
-When you keep calling this URL, you will see that it returns one new value every 10 seconds, and the timestamp always ends with zero. Similarly, if you say `points=1&after=-5` it will always return timestamps ending with 0 or 5.
-
-
diff --git a/web/api/queries/average/Makefile.am b/web/api/queries/average/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/queries/average/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/queries/average/README.md b/web/api/queries/average/README.md
deleted file mode 100644
index 3a9c53934..000000000
--- a/web/api/queries/average/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-<!--
-title: "Average or Mean"
-sidebar_label: "Average or Mean"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/average/README.md
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Queries"
--->
-
-# Average or Mean
-
-> This query is available as `average` and `mean`.
-
-An average is a single number taken as representative of a list of numbers.
-
-It is calculated as:
-
-```
-average = sum(numbers) / count(numbers)
-```
-
-## how to use
-
-Use it in alerts like this:
-
-```
- alarm: my_alert
- on: my_chart
-lookup: average -1m unaligned of my_dimension
- warn: $this > 1000
-```
-
-`average` does not change the units. For example, if the chart units is `requests/sec`, the result
-will be again expressed in the same units.
-
-It can also be used in APIs and badges as `&group=average` in the URL.
-
-## Examples
-
-Examining last 1 minute `successful` web server responses:
-
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=orange)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
-
-## References
-
-- <https://en.wikipedia.org/wiki/Average>.
-
-
diff --git a/web/api/queries/countif/Makefile.am b/web/api/queries/countif/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/queries/countif/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/queries/countif/README.md b/web/api/queries/countif/README.md
deleted file mode 100644
index 4004e7a27..000000000
--- a/web/api/queries/countif/README.md
+++ /dev/null
@@ -1,40 +0,0 @@
-<!--
-title: "CountIf"
-sidebar_label: "CountIf"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/countif/README.md
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Queries"
--->
-
-# CountIf
-
-> This query is available as `countif`.
-
-CountIf returns the percentage of points in the database that satisfy the condition supplied.
-
-The following conditions are available:
-
-- `!` or `!=` or `<>`, different than
-- `=` or `:`, equal to
-- `>`, greater than
-- `<`, less than
-- `>=`, greater or equal to
-- `<=`, less or equal to
-
-The target number and the desired condition can be set using the `group_options` query parameter, as a string, like in these examples:
-
-- `!0`, to match any number except zero.
-- `>=-3` to match any number bigger or equal to -3.
-
-. When an invalid condition is given, the web server can deliver a not accurate response.
-
-## how to use
-
-This query cannot be used in alerts.
-
-`countif` changes the units of charts. The result of the calculation is always from zero to 1, expressing the percentage of database points that matched the condition.
-
-In APIs and badges can be used like this: `&group=countif&group_options=>10` in the URL.
-
-
diff --git a/web/api/queries/des/Makefile.am b/web/api/queries/des/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/queries/des/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/queries/des/README.md b/web/api/queries/des/README.md
deleted file mode 100644
index 0cc1a918e..000000000
--- a/web/api/queries/des/README.md
+++ /dev/null
@@ -1,77 +0,0 @@
-<!--
-title: "double exponential smoothing"
-sidebar_label: "double exponential smoothing"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/des/README.md
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Queries"
--->
-
-# double exponential smoothing
-
-Exponential smoothing is one of many window functions commonly applied to smooth data in signal
-processing, acting as low-pass filters to remove high frequency noise.
-
-Simple exponential smoothing does not do well when there is a trend in the data.
-In such situations, several methods were devised under the name "double exponential smoothing"
-or "second-order exponential smoothing.", which is the recursive application of an exponential
-filter twice, thus being termed "double exponential smoothing".
-
-In simple terms, this is like an average value, but more recent values are given more weight
-and the trend of the values influences significantly the result.
-
-> **IMPORTANT**
->
-> It is common for `des` to provide "average" values that far beyond the minimum or the maximum
-> values found in the time-series.
-> `des` estimates these values because of it takes into account the trend.
-
-This module implements the "Holt-Winters double exponential smoothing".
-
-Netdata automatically adjusts the weight (`alpha`) and the trend (`beta`) based on the number
-of values processed, using the formula:
-
-```
-window = max(number of values, 15)
-alpha = 2 / (window + 1)
-beta = 2 / (window + 1)
-```
-
-You can change the fixed value `15` by setting in `netdata.conf`:
-
-```
-[web]
- des max window = 15
-```
-
-## how to use
-
-Use it in alerts like this:
-
-```
- alarm: my_alert
- on: my_chart
-lookup: des -1m unaligned of my_dimension
- warn: $this > 1000
-```
-
-`des` does not change the units. For example, if the chart units is `requests/sec`, the result
-will be again expressed in the same units.
-
-It can also be used in APIs and badges as `&group=des` in the URL.
-
-## Examples
-
-Examining last 1 minute `successful` web server responses:
-
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=single+exponential+smoothing&value_color=yellow)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=des&after=-60&label=double+exponential+smoothing&value_color=orange)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
-
-## References
-
-- <https://en.wikipedia.org/wiki/Exponential_smoothing>.
-
-
diff --git a/web/api/queries/incremental_sum/Makefile.am b/web/api/queries/incremental_sum/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/queries/incremental_sum/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/queries/incremental_sum/README.md b/web/api/queries/incremental_sum/README.md
deleted file mode 100644
index c882acba9..000000000
--- a/web/api/queries/incremental_sum/README.md
+++ /dev/null
@@ -1,45 +0,0 @@
-<!--
-title: "Incremental Sum (`incremental_sum`)"
-sidebar_label: "Incremental Sum (`incremental_sum`)"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/incremental_sum/README.md
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Queries"
--->
-
-# Incremental Sum (`incremental_sum`)
-
-This modules finds the incremental sum of a period, which `last value - first value`.
-
-The result may be positive (rising) or negative (falling) depending on the first and last values.
-
-## how to use
-
-Use it in alerts like this:
-
-```
- alarm: my_alert
- on: my_chart
-lookup: incremental_sum -1m unaligned of my_dimension
- warn: $this > 1000
-```
-
-`incremental_sum` does not change the units. For example, if the chart units is `requests/sec`, the result
-will be again expressed in the same units.
-
-It can also be used in APIs and badges as `&group=incremental_sum` in the URL.
-
-## Examples
-
-Examining last 1 minute `successful` web server responses:
-
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=incremental_sum&after=-60&label=incremental+sum&value_color=orange)
-
-## References
-
-- none
-
-
diff --git a/web/api/queries/max/Makefile.am b/web/api/queries/max/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/queries/max/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/queries/max/README.md b/web/api/queries/max/README.md
deleted file mode 100644
index e7ad5446d..000000000
--- a/web/api/queries/max/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-<!--
-title: "Max"
-sidebar_label: "Max"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/max/README.md
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Queries"
--->
-
-# Max
-
-This module finds the max value in the time-frame given.
-
-## how to use
-
-Use it in alerts like this:
-
-```
- alarm: my_alert
- on: my_chart
-lookup: max -1m unaligned of my_dimension
- warn: $this > 1000
-```
-
-`max` does not change the units. For example, if the chart units is `requests/sec`, the result
-will be again expressed in the same units.
-
-It can also be used in APIs and badges as `&group=max` in the URL.
-
-## Examples
-
-Examining last 1 minute `successful` web server responses:
-
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max&value_color=orange)
-
-## References
-
-- <https://en.wikipedia.org/wiki/Sample_maximum_and_minimum>.
-
-
diff --git a/web/api/queries/median/Makefile.am b/web/api/queries/median/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/queries/median/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/queries/median/README.md b/web/api/queries/median/README.md
deleted file mode 100644
index f1fb3a61c..000000000
--- a/web/api/queries/median/README.md
+++ /dev/null
@@ -1,64 +0,0 @@
-<!--
-title: "Median"
-sidebar_label: "Median"
-description: "Use median in API queries and health entities to find the 'middle' value from a sample, eliminating any unwanted spikes in the returned metrics."
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/median/README.md
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Queries"
--->
-
-# Median
-
-The median is the value separating the higher half from the lower half of a data sample
-(a population or a probability distribution). For a data set, it may be thought of as the
-"middle" value.
-
-`median` is not an accurate average. However, it eliminates all spikes, by sorting
-all the values in a period, and selecting the value in the middle of the sorted array.
-
-Netdata also supports `trimmed-median`, which trims a percentage of the smaller and bigger values prior to finding the
-median. The following `trimmed-median` functions are defined:
-
-- `trimmed-median1`
-- `trimmed-median2`
-- `trimmed-median3`
-- `trimmed-median5`
-- `trimmed-median10`
-- `trimmed-median15`
-- `trimmed-median20`
-- `trimmed-median25`
-
-The function `trimmed-median` is an alias for `trimmed-median5`.
-
-## how to use
-
-Use it in alerts like this:
-
-```
- alarm: my_alert
- on: my_chart
-lookup: median -1m unaligned of my_dimension
- warn: $this > 1000
-```
-
-`median` does not change the units. For example, if the chart units is `requests/sec`, the result
-will be again expressed in the same units.
-
-It can also be used in APIs and badges as `&group=median` in the URL. Additionally, a percentage may be given with
-`&group_options=` to trim all small and big values before finding the median.
-
-## Examples
-
-Examining last 1 minute `successful` web server responses:
-
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=median&after=-60&label=median&value_color=orange)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
-
-## References
-
-- <https://en.wikipedia.org/wiki/Median>.
-
-
diff --git a/web/api/queries/min/Makefile.am b/web/api/queries/min/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/queries/min/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/queries/min/README.md b/web/api/queries/min/README.md
deleted file mode 100644
index 67f3326ed..000000000
--- a/web/api/queries/min/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-<!--
-title: "Min"
-sidebar_label: "Min"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/min/README.md
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Queries"
--->
-
-# Min
-
-This module finds the min value in the time-frame given.
-
-## how to use
-
-Use it in alerts like this:
-
-```
- alarm: my_alert
- on: my_chart
-lookup: min -1m unaligned of my_dimension
- warn: $this > 1000
-```
-
-`min` does not change the units. For example, if the chart units is `requests/sec`, the result
-will be again expressed in the same units.
-
-It can also be used in APIs and badges as `&group=min` in the URL.
-
-## Examples
-
-Examining last 1 minute `successful` web server responses:
-
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min&value_color=orange)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
-
-## References
-
-- <https://en.wikipedia.org/wiki/Sample_maximum_and_minimum>.
-
-
diff --git a/web/api/queries/percentile/Makefile.am b/web/api/queries/percentile/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/queries/percentile/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/queries/percentile/README.md b/web/api/queries/percentile/README.md
deleted file mode 100644
index e0d21ee76..000000000
--- a/web/api/queries/percentile/README.md
+++ /dev/null
@@ -1,62 +0,0 @@
-<!--
-title: "Percentile"
-sidebar_label: "Percentile"
-description: "Use percentile in API queries and health entities to find the 'percentile' value from a sample, eliminating any unwanted spikes in the returned metrics."
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/percentile/README.md
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Queries"
--->
-
-# Percentile
-
-The percentile is the average value of a series using only the smaller N percentile of the values.
-(a population or a probability distribution).
-
-Netdata applies linear interpolation on the last point, if the percentile requested does not give a round number of
-points.
-
-The following percentile aliases are defined:
-
-- `percentile25`
-- `percentile50`
-- `percentile75`
-- `percentile80`
-- `percentile90`
-- `percentile95`
-- `percentile97`
-- `percentile98`
-- `percentile99`
-
-The default `percentile` is an alias for `percentile95`.
-Any percentile may be requested using the `group_options` query parameter.
-
-## how to use
-
-Use it in alerts like this:
-
-```
- alarm: my_alert
- on: my_chart
-lookup: percentile95 -1m unaligned of my_dimension
- warn: $this > 1000
-```
-
-`percentile` does not change the units. For example, if the chart units is `requests/sec`, the result
-will be again expressed in the same units.
-
-It can also be used in APIs and badges as `&group=percentile` in the URL and the additional parameter `group_options`
-may be used to request any percentile (e.g. `&group=percentile&group_options=96`).
-
-## Examples
-
-Examining last 1 minute `successful` web server responses:
-
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=percentile95&after=-60&label=percentile95&value_color=orange)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
-
-## References
-
-- <https://en.wikipedia.org/wiki/Percentile>.
diff --git a/web/api/queries/query.c b/web/api/queries/query.c
deleted file mode 100644
index 76d673cae..000000000
--- a/web/api/queries/query.c
+++ /dev/null
@@ -1,3713 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "query.h"
-#include "web/api/formatters/rrd2json.h"
-#include "rrdr.h"
-
-#include "average/average.h"
-#include "countif/countif.h"
-#include "incremental_sum/incremental_sum.h"
-#include "max/max.h"
-#include "median/median.h"
-#include "min/min.h"
-#include "sum/sum.h"
-#include "stddev/stddev.h"
-#include "ses/ses.h"
-#include "des/des.h"
-#include "percentile/percentile.h"
-#include "trimmed_mean/trimmed_mean.h"
-
-#define QUERY_PLAN_MIN_POINTS 10
-#define POINTS_TO_EXPAND_QUERY 5
-
-// ----------------------------------------------------------------------------
-
-static struct {
- const char *name;
- uint32_t hash;
- RRDR_TIME_GROUPING value;
- RRDR_TIME_GROUPING add_flush;
-
- // One time initialization for the module.
- // This is called once, when netdata starts.
- void (*init)(void);
-
- // Allocate all required structures for a query.
- // This is called once for each netdata query.
- void (*create)(struct rrdresult *r, const char *options);
-
- // Cleanup collected values, but don't destroy the structures.
- // This is called when the query engine switches dimensions,
- // as part of the same query (so same chart, switching metric).
- void (*reset)(struct rrdresult *r);
-
- // Free all resources allocated for the query.
- void (*free)(struct rrdresult *r);
-
- // Add a single value into the calculation.
- // The module may decide to cache it, or use it in the fly.
- void (*add)(struct rrdresult *r, NETDATA_DOUBLE value);
-
- // Generate a single result for the values added so far.
- // More values and points may be requested later.
- // It is up to the module to reset its internal structures
- // when flushing it (so for a few modules it may be better to
- // continue after a flush as if nothing changed, for others a
- // cleanup of the internal structures may be required).
- NETDATA_DOUBLE (*flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
-
- TIER_QUERY_FETCH tier_query_fetch;
-} api_v1_data_groups[] = {
- {.name = "average",
- .hash = 0,
- .value = RRDR_GROUPING_AVERAGE,
- .add_flush = RRDR_GROUPING_AVERAGE,
- .init = NULL,
- .create= tg_average_create,
- .reset = tg_average_reset,
- .free = tg_average_free,
- .add = tg_average_add,
- .flush = tg_average_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "avg", // alias on 'average'
- .hash = 0,
- .value = RRDR_GROUPING_AVERAGE,
- .add_flush = RRDR_GROUPING_AVERAGE,
- .init = NULL,
- .create= tg_average_create,
- .reset = tg_average_reset,
- .free = tg_average_free,
- .add = tg_average_add,
- .flush = tg_average_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "mean", // alias on 'average'
- .hash = 0,
- .value = RRDR_GROUPING_AVERAGE,
- .add_flush = RRDR_GROUPING_AVERAGE,
- .init = NULL,
- .create= tg_average_create,
- .reset = tg_average_reset,
- .free = tg_average_free,
- .add = tg_average_add,
- .flush = tg_average_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean1",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN1,
- .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
- .init = NULL,
- .create= tg_trimmed_mean_create_1,
- .reset = tg_trimmed_mean_reset,
- .free = tg_trimmed_mean_free,
- .add = tg_trimmed_mean_add,
- .flush = tg_trimmed_mean_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean2",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN2,
- .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
- .init = NULL,
- .create= tg_trimmed_mean_create_2,
- .reset = tg_trimmed_mean_reset,
- .free = tg_trimmed_mean_free,
- .add = tg_trimmed_mean_add,
- .flush = tg_trimmed_mean_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean3",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN3,
- .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
- .init = NULL,
- .create= tg_trimmed_mean_create_3,
- .reset = tg_trimmed_mean_reset,
- .free = tg_trimmed_mean_free,
- .add = tg_trimmed_mean_add,
- .flush = tg_trimmed_mean_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean5",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN,
- .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
- .init = NULL,
- .create= tg_trimmed_mean_create_5,
- .reset = tg_trimmed_mean_reset,
- .free = tg_trimmed_mean_free,
- .add = tg_trimmed_mean_add,
- .flush = tg_trimmed_mean_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean10",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN10,
- .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
- .init = NULL,
- .create= tg_trimmed_mean_create_10,
- .reset = tg_trimmed_mean_reset,
- .free = tg_trimmed_mean_free,
- .add = tg_trimmed_mean_add,
- .flush = tg_trimmed_mean_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean15",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN15,
- .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
- .init = NULL,
- .create= tg_trimmed_mean_create_15,
- .reset = tg_trimmed_mean_reset,
- .free = tg_trimmed_mean_free,
- .add = tg_trimmed_mean_add,
- .flush = tg_trimmed_mean_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean20",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN20,
- .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
- .init = NULL,
- .create= tg_trimmed_mean_create_20,
- .reset = tg_trimmed_mean_reset,
- .free = tg_trimmed_mean_free,
- .add = tg_trimmed_mean_add,
- .flush = tg_trimmed_mean_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean25",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN25,
- .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
- .init = NULL,
- .create= tg_trimmed_mean_create_25,
- .reset = tg_trimmed_mean_reset,
- .free = tg_trimmed_mean_free,
- .add = tg_trimmed_mean_add,
- .flush = tg_trimmed_mean_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-mean",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN,
- .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
- .init = NULL,
- .create= tg_trimmed_mean_create_5,
- .reset = tg_trimmed_mean_reset,
- .free = tg_trimmed_mean_free,
- .add = tg_trimmed_mean_add,
- .flush = tg_trimmed_mean_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "incremental_sum",
- .hash = 0,
- .value = RRDR_GROUPING_INCREMENTAL_SUM,
- .add_flush = RRDR_GROUPING_INCREMENTAL_SUM,
- .init = NULL,
- .create= tg_incremental_sum_create,
- .reset = tg_incremental_sum_reset,
- .free = tg_incremental_sum_free,
- .add = tg_incremental_sum_add,
- .flush = tg_incremental_sum_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "incremental-sum",
- .hash = 0,
- .value = RRDR_GROUPING_INCREMENTAL_SUM,
- .add_flush = RRDR_GROUPING_INCREMENTAL_SUM,
- .init = NULL,
- .create= tg_incremental_sum_create,
- .reset = tg_incremental_sum_reset,
- .free = tg_incremental_sum_free,
- .add = tg_incremental_sum_add,
- .flush = tg_incremental_sum_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "median",
- .hash = 0,
- .value = RRDR_GROUPING_MEDIAN,
- .add_flush = RRDR_GROUPING_MEDIAN,
- .init = NULL,
- .create= tg_median_create,
- .reset = tg_median_reset,
- .free = tg_median_free,
- .add = tg_median_add,
- .flush = tg_median_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median1",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN1,
- .add_flush = RRDR_GROUPING_MEDIAN,
- .init = NULL,
- .create= tg_median_create_trimmed_1,
- .reset = tg_median_reset,
- .free = tg_median_free,
- .add = tg_median_add,
- .flush = tg_median_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median2",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN2,
- .add_flush = RRDR_GROUPING_MEDIAN,
- .init = NULL,
- .create= tg_median_create_trimmed_2,
- .reset = tg_median_reset,
- .free = tg_median_free,
- .add = tg_median_add,
- .flush = tg_median_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median3",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN3,
- .add_flush = RRDR_GROUPING_MEDIAN,
- .init = NULL,
- .create= tg_median_create_trimmed_3,
- .reset = tg_median_reset,
- .free = tg_median_free,
- .add = tg_median_add,
- .flush = tg_median_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median5",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN5,
- .add_flush = RRDR_GROUPING_MEDIAN,
- .init = NULL,
- .create= tg_median_create_trimmed_5,
- .reset = tg_median_reset,
- .free = tg_median_free,
- .add = tg_median_add,
- .flush = tg_median_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median10",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN10,
- .add_flush = RRDR_GROUPING_MEDIAN,
- .init = NULL,
- .create= tg_median_create_trimmed_10,
- .reset = tg_median_reset,
- .free = tg_median_free,
- .add = tg_median_add,
- .flush = tg_median_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median15",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN15,
- .add_flush = RRDR_GROUPING_MEDIAN,
- .init = NULL,
- .create= tg_median_create_trimmed_15,
- .reset = tg_median_reset,
- .free = tg_median_free,
- .add = tg_median_add,
- .flush = tg_median_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median20",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN20,
- .add_flush = RRDR_GROUPING_MEDIAN,
- .init = NULL,
- .create= tg_median_create_trimmed_20,
- .reset = tg_median_reset,
- .free = tg_median_free,
- .add = tg_median_add,
- .flush = tg_median_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median25",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN25,
- .add_flush = RRDR_GROUPING_MEDIAN,
- .init = NULL,
- .create= tg_median_create_trimmed_25,
- .reset = tg_median_reset,
- .free = tg_median_free,
- .add = tg_median_add,
- .flush = tg_median_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "trimmed-median",
- .hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEDIAN5,
- .add_flush = RRDR_GROUPING_MEDIAN,
- .init = NULL,
- .create= tg_median_create_trimmed_5,
- .reset = tg_median_reset,
- .free = tg_median_free,
- .add = tg_median_add,
- .flush = tg_median_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile25",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE25,
- .add_flush = RRDR_GROUPING_PERCENTILE,
- .init = NULL,
- .create= tg_percentile_create_25,
- .reset = tg_percentile_reset,
- .free = tg_percentile_free,
- .add = tg_percentile_add,
- .flush = tg_percentile_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile50",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE50,
- .add_flush = RRDR_GROUPING_PERCENTILE,
- .init = NULL,
- .create= tg_percentile_create_50,
- .reset = tg_percentile_reset,
- .free = tg_percentile_free,
- .add = tg_percentile_add,
- .flush = tg_percentile_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile75",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE75,
- .add_flush = RRDR_GROUPING_PERCENTILE,
- .init = NULL,
- .create= tg_percentile_create_75,
- .reset = tg_percentile_reset,
- .free = tg_percentile_free,
- .add = tg_percentile_add,
- .flush = tg_percentile_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile80",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE80,
- .add_flush = RRDR_GROUPING_PERCENTILE,
- .init = NULL,
- .create= tg_percentile_create_80,
- .reset = tg_percentile_reset,
- .free = tg_percentile_free,
- .add = tg_percentile_add,
- .flush = tg_percentile_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile90",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE90,
- .add_flush = RRDR_GROUPING_PERCENTILE,
- .init = NULL,
- .create= tg_percentile_create_90,
- .reset = tg_percentile_reset,
- .free = tg_percentile_free,
- .add = tg_percentile_add,
- .flush = tg_percentile_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile95",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE,
- .add_flush = RRDR_GROUPING_PERCENTILE,
- .init = NULL,
- .create= tg_percentile_create_95,
- .reset = tg_percentile_reset,
- .free = tg_percentile_free,
- .add = tg_percentile_add,
- .flush = tg_percentile_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile97",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE97,
- .add_flush = RRDR_GROUPING_PERCENTILE,
- .init = NULL,
- .create= tg_percentile_create_97,
- .reset = tg_percentile_reset,
- .free = tg_percentile_free,
- .add = tg_percentile_add,
- .flush = tg_percentile_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile98",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE98,
- .add_flush = RRDR_GROUPING_PERCENTILE,
- .init = NULL,
- .create= tg_percentile_create_98,
- .reset = tg_percentile_reset,
- .free = tg_percentile_free,
- .add = tg_percentile_add,
- .flush = tg_percentile_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile99",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE99,
- .add_flush = RRDR_GROUPING_PERCENTILE,
- .init = NULL,
- .create= tg_percentile_create_99,
- .reset = tg_percentile_reset,
- .free = tg_percentile_free,
- .add = tg_percentile_add,
- .flush = tg_percentile_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "percentile",
- .hash = 0,
- .value = RRDR_GROUPING_PERCENTILE,
- .add_flush = RRDR_GROUPING_PERCENTILE,
- .init = NULL,
- .create= tg_percentile_create_95,
- .reset = tg_percentile_reset,
- .free = tg_percentile_free,
- .add = tg_percentile_add,
- .flush = tg_percentile_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "min",
- .hash = 0,
- .value = RRDR_GROUPING_MIN,
- .add_flush = RRDR_GROUPING_MIN,
- .init = NULL,
- .create= tg_min_create,
- .reset = tg_min_reset,
- .free = tg_min_free,
- .add = tg_min_add,
- .flush = tg_min_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_MIN
- },
- {.name = "max",
- .hash = 0,
- .value = RRDR_GROUPING_MAX,
- .add_flush = RRDR_GROUPING_MAX,
- .init = NULL,
- .create= tg_max_create,
- .reset = tg_max_reset,
- .free = tg_max_free,
- .add = tg_max_add,
- .flush = tg_max_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_MAX
- },
- {.name = "sum",
- .hash = 0,
- .value = RRDR_GROUPING_SUM,
- .add_flush = RRDR_GROUPING_SUM,
- .init = NULL,
- .create= tg_sum_create,
- .reset = tg_sum_reset,
- .free = tg_sum_free,
- .add = tg_sum_add,
- .flush = tg_sum_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_SUM
- },
-
- // standard deviation
- {.name = "stddev",
- .hash = 0,
- .value = RRDR_GROUPING_STDDEV,
- .add_flush = RRDR_GROUPING_STDDEV,
- .init = NULL,
- .create= tg_stddev_create,
- .reset = tg_stddev_reset,
- .free = tg_stddev_free,
- .add = tg_stddev_add,
- .flush = tg_stddev_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "cv", // coefficient variation is calculated by stddev
- .hash = 0,
- .value = RRDR_GROUPING_CV,
- .add_flush = RRDR_GROUPING_CV,
- .init = NULL,
- .create= tg_stddev_create, // not an error, stddev calculates this too
- .reset = tg_stddev_reset, // not an error, stddev calculates this too
- .free = tg_stddev_free, // not an error, stddev calculates this too
- .add = tg_stddev_add, // not an error, stddev calculates this too
- .flush = tg_stddev_coefficient_of_variation_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "rsd", // alias of 'cv'
- .hash = 0,
- .value = RRDR_GROUPING_CV,
- .add_flush = RRDR_GROUPING_CV,
- .init = NULL,
- .create= tg_stddev_create, // not an error, stddev calculates this too
- .reset = tg_stddev_reset, // not an error, stddev calculates this too
- .free = tg_stddev_free, // not an error, stddev calculates this too
- .add = tg_stddev_add, // not an error, stddev calculates this too
- .flush = tg_stddev_coefficient_of_variation_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
-
- // single exponential smoothing
- {.name = "ses",
- .hash = 0,
- .value = RRDR_GROUPING_SES,
- .add_flush = RRDR_GROUPING_SES,
- .init = tg_ses_init,
- .create= tg_ses_create,
- .reset = tg_ses_reset,
- .free = tg_ses_free,
- .add = tg_ses_add,
- .flush = tg_ses_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "ema", // alias for 'ses'
- .hash = 0,
- .value = RRDR_GROUPING_SES,
- .add_flush = RRDR_GROUPING_SES,
- .init = NULL,
- .create= tg_ses_create,
- .reset = tg_ses_reset,
- .free = tg_ses_free,
- .add = tg_ses_add,
- .flush = tg_ses_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- {.name = "ewma", // alias for ses
- .hash = 0,
- .value = RRDR_GROUPING_SES,
- .add_flush = RRDR_GROUPING_SES,
- .init = NULL,
- .create= tg_ses_create,
- .reset = tg_ses_reset,
- .free = tg_ses_free,
- .add = tg_ses_add,
- .flush = tg_ses_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
-
- // double exponential smoothing
- {.name = "des",
- .hash = 0,
- .value = RRDR_GROUPING_DES,
- .add_flush = RRDR_GROUPING_DES,
- .init = tg_des_init,
- .create= tg_des_create,
- .reset = tg_des_reset,
- .free = tg_des_free,
- .add = tg_des_add,
- .flush = tg_des_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
-
- {.name = "countif",
- .hash = 0,
- .value = RRDR_GROUPING_COUNTIF,
- .add_flush = RRDR_GROUPING_COUNTIF,
- .init = NULL,
- .create= tg_countif_create,
- .reset = tg_countif_reset,
- .free = tg_countif_free,
- .add = tg_countif_add,
- .flush = tg_countif_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
-
- // terminator
- {.name = NULL,
- .hash = 0,
- .value = RRDR_GROUPING_UNDEFINED,
- .add_flush = RRDR_GROUPING_AVERAGE,
- .init = NULL,
- .create= tg_average_create,
- .reset = tg_average_reset,
- .free = tg_average_free,
- .add = tg_average_add,
- .flush = tg_average_flush,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- }
-};
-
-void time_grouping_init(void) {
- int i;
-
- for(i = 0; api_v1_data_groups[i].name ; i++) {
- api_v1_data_groups[i].hash = simple_hash(api_v1_data_groups[i].name);
-
- if(api_v1_data_groups[i].init)
- api_v1_data_groups[i].init();
- }
-}
-
-const char *time_grouping_method2string(RRDR_TIME_GROUPING group) {
- int i;
-
- for(i = 0; api_v1_data_groups[i].name ; i++) {
- if(api_v1_data_groups[i].value == group) {
- return api_v1_data_groups[i].name;
- }
- }
-
- return "unknown-group-method";
-}
-
-RRDR_TIME_GROUPING time_grouping_parse(const char *name, RRDR_TIME_GROUPING def) {
- int i;
-
- uint32_t hash = simple_hash(name);
- for(i = 0; api_v1_data_groups[i].name ; i++)
- if(unlikely(hash == api_v1_data_groups[i].hash && !strcmp(name, api_v1_data_groups[i].name)))
- return api_v1_data_groups[i].value;
-
- return def;
-}
-
-const char *time_grouping_tostring(RRDR_TIME_GROUPING group) {
- int i;
-
- for(i = 0; api_v1_data_groups[i].name ; i++)
- if(unlikely(group == api_v1_data_groups[i].value))
- return api_v1_data_groups[i].name;
-
- return "unknown";
-}
-
-static void rrdr_set_grouping_function(RRDR *r, RRDR_TIME_GROUPING group_method) {
- int i, found = 0;
- for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
- if(api_v1_data_groups[i].value == group_method) {
- r->time_grouping.create = api_v1_data_groups[i].create;
- r->time_grouping.reset = api_v1_data_groups[i].reset;
- r->time_grouping.free = api_v1_data_groups[i].free;
- r->time_grouping.add = api_v1_data_groups[i].add;
- r->time_grouping.flush = api_v1_data_groups[i].flush;
- r->time_grouping.tier_query_fetch = api_v1_data_groups[i].tier_query_fetch;
- r->time_grouping.add_flush = api_v1_data_groups[i].add_flush;
- found = 1;
- }
- }
- if(!found) {
- errno = 0;
- internal_error(true, "QUERY: grouping method %u not found. Using 'average'", (unsigned int)group_method);
- r->time_grouping.create = tg_average_create;
- r->time_grouping.reset = tg_average_reset;
- r->time_grouping.free = tg_average_free;
- r->time_grouping.add = tg_average_add;
- r->time_grouping.flush = tg_average_flush;
- r->time_grouping.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE;
- r->time_grouping.add_flush = RRDR_GROUPING_AVERAGE;
- }
-}
-
-static inline void time_grouping_add(RRDR *r, NETDATA_DOUBLE value, const RRDR_TIME_GROUPING add_flush) {
- switch(add_flush) {
- case RRDR_GROUPING_AVERAGE:
- tg_average_add(r, value);
- break;
-
- case RRDR_GROUPING_MAX:
- tg_max_add(r, value);
- break;
-
- case RRDR_GROUPING_MIN:
- tg_min_add(r, value);
- break;
-
- case RRDR_GROUPING_MEDIAN:
- tg_median_add(r, value);
- break;
-
- case RRDR_GROUPING_STDDEV:
- case RRDR_GROUPING_CV:
- tg_stddev_add(r, value);
- break;
-
- case RRDR_GROUPING_SUM:
- tg_sum_add(r, value);
- break;
-
- case RRDR_GROUPING_COUNTIF:
- tg_countif_add(r, value);
- break;
-
- case RRDR_GROUPING_TRIMMED_MEAN:
- tg_trimmed_mean_add(r, value);
- break;
-
- case RRDR_GROUPING_PERCENTILE:
- tg_percentile_add(r, value);
- break;
-
- case RRDR_GROUPING_SES:
- tg_ses_add(r, value);
- break;
-
- case RRDR_GROUPING_DES:
- tg_des_add(r, value);
- break;
-
- case RRDR_GROUPING_INCREMENTAL_SUM:
- tg_incremental_sum_add(r, value);
- break;
-
- default:
- r->time_grouping.add(r, value);
- break;
- }
-}
-
-static inline NETDATA_DOUBLE time_grouping_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr, const RRDR_TIME_GROUPING add_flush) {
- switch(add_flush) {
- case RRDR_GROUPING_AVERAGE:
- return tg_average_flush(r, rrdr_value_options_ptr);
-
- case RRDR_GROUPING_MAX:
- return tg_max_flush(r, rrdr_value_options_ptr);
-
- case RRDR_GROUPING_MIN:
- return tg_min_flush(r, rrdr_value_options_ptr);
-
- case RRDR_GROUPING_MEDIAN:
- return tg_median_flush(r, rrdr_value_options_ptr);
-
- case RRDR_GROUPING_STDDEV:
- return tg_stddev_flush(r, rrdr_value_options_ptr);
-
- case RRDR_GROUPING_CV:
- return tg_stddev_coefficient_of_variation_flush(r, rrdr_value_options_ptr);
-
- case RRDR_GROUPING_SUM:
- return tg_sum_flush(r, rrdr_value_options_ptr);
-
- case RRDR_GROUPING_COUNTIF:
- return tg_countif_flush(r, rrdr_value_options_ptr);
-
- case RRDR_GROUPING_TRIMMED_MEAN:
- return tg_trimmed_mean_flush(r, rrdr_value_options_ptr);
-
- case RRDR_GROUPING_PERCENTILE:
- return tg_percentile_flush(r, rrdr_value_options_ptr);
-
- case RRDR_GROUPING_SES:
- return tg_ses_flush(r, rrdr_value_options_ptr);
-
- case RRDR_GROUPING_DES:
- return tg_des_flush(r, rrdr_value_options_ptr);
-
- case RRDR_GROUPING_INCREMENTAL_SUM:
- return tg_incremental_sum_flush(r, rrdr_value_options_ptr);
-
- default:
- return r->time_grouping.flush(r, rrdr_value_options_ptr);
- }
-}
-
-RRDR_GROUP_BY group_by_parse(char *s) {
- RRDR_GROUP_BY group_by = RRDR_GROUP_BY_NONE;
-
- while(s) {
- char *key = strsep_skip_consecutive_separators(&s, ",| ");
- if (!key || !*key) continue;
-
- if (strcmp(key, "selected") == 0)
- group_by |= RRDR_GROUP_BY_SELECTED;
-
- if (strcmp(key, "dimension") == 0)
- group_by |= RRDR_GROUP_BY_DIMENSION;
-
- if (strcmp(key, "instance") == 0)
- group_by |= RRDR_GROUP_BY_INSTANCE;
-
- if (strcmp(key, "percentage-of-instance") == 0)
- group_by |= RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE;
-
- if (strcmp(key, "label") == 0)
- group_by |= RRDR_GROUP_BY_LABEL;
-
- if (strcmp(key, "node") == 0)
- group_by |= RRDR_GROUP_BY_NODE;
-
- if (strcmp(key, "context") == 0)
- group_by |= RRDR_GROUP_BY_CONTEXT;
-
- if (strcmp(key, "units") == 0)
- group_by |= RRDR_GROUP_BY_UNITS;
- }
-
- if((group_by & RRDR_GROUP_BY_SELECTED) && (group_by & ~RRDR_GROUP_BY_SELECTED)) {
- internal_error(true, "group-by given by query has 'selected' together with more groupings");
- group_by = RRDR_GROUP_BY_SELECTED; // remove all other groupings
- }
-
- if(group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)
- group_by = RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE; // remove all other groupings
-
- return group_by;
-}
-
-void buffer_json_group_by_to_array(BUFFER *wb, RRDR_GROUP_BY group_by) {
- if(group_by == RRDR_GROUP_BY_NONE)
- buffer_json_add_array_item_string(wb, "none");
- else {
- if (group_by & RRDR_GROUP_BY_DIMENSION)
- buffer_json_add_array_item_string(wb, "dimension");
-
- if (group_by & RRDR_GROUP_BY_INSTANCE)
- buffer_json_add_array_item_string(wb, "instance");
-
- if (group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)
- buffer_json_add_array_item_string(wb, "percentage-of-instance");
-
- if (group_by & RRDR_GROUP_BY_LABEL)
- buffer_json_add_array_item_string(wb, "label");
-
- if (group_by & RRDR_GROUP_BY_NODE)
- buffer_json_add_array_item_string(wb, "node");
-
- if (group_by & RRDR_GROUP_BY_CONTEXT)
- buffer_json_add_array_item_string(wb, "context");
-
- if (group_by & RRDR_GROUP_BY_UNITS)
- buffer_json_add_array_item_string(wb, "units");
-
- if (group_by & RRDR_GROUP_BY_SELECTED)
- buffer_json_add_array_item_string(wb, "selected");
- }
-}
-
-RRDR_GROUP_BY_FUNCTION group_by_aggregate_function_parse(const char *s) {
- if(strcmp(s, "average") == 0)
- return RRDR_GROUP_BY_FUNCTION_AVERAGE;
-
- if(strcmp(s, "avg") == 0)
- return RRDR_GROUP_BY_FUNCTION_AVERAGE;
-
- if(strcmp(s, "min") == 0)
- return RRDR_GROUP_BY_FUNCTION_MIN;
-
- if(strcmp(s, "max") == 0)
- return RRDR_GROUP_BY_FUNCTION_MAX;
-
- if(strcmp(s, "sum") == 0)
- return RRDR_GROUP_BY_FUNCTION_SUM;
-
- if(strcmp(s, "percentage") == 0)
- return RRDR_GROUP_BY_FUNCTION_PERCENTAGE;
-
- return RRDR_GROUP_BY_FUNCTION_AVERAGE;
-}
-
-const char *group_by_aggregate_function_to_string(RRDR_GROUP_BY_FUNCTION group_by_function) {
- switch(group_by_function) {
- default:
- case RRDR_GROUP_BY_FUNCTION_AVERAGE:
- return "average";
-
- case RRDR_GROUP_BY_FUNCTION_MIN:
- return "min";
-
- case RRDR_GROUP_BY_FUNCTION_MAX:
- return "max";
-
- case RRDR_GROUP_BY_FUNCTION_SUM:
- return "sum";
-
- case RRDR_GROUP_BY_FUNCTION_PERCENTAGE:
- return "percentage";
- }
-}
-
-// ----------------------------------------------------------------------------
-// helpers to find our way in RRDR
-
-static inline RRDR_VALUE_FLAGS *UNUSED_FUNCTION(rrdr_line_options)(RRDR *r, long rrdr_line) {
- return &r->o[ rrdr_line * r->d ];
-}
-
-static inline NETDATA_DOUBLE *UNUSED_FUNCTION(rrdr_line_values)(RRDR *r, long rrdr_line) {
- return &r->v[ rrdr_line * r->d ];
-}
-
-static inline long rrdr_line_init(RRDR *r __maybe_unused, time_t t __maybe_unused, long rrdr_line) {
- rrdr_line++;
-
- internal_fatal(rrdr_line >= (long)r->n,
- "QUERY: requested to step above RRDR size for query '%s'",
- r->internal.qt->id);
-
- internal_fatal(r->t[rrdr_line] != t,
- "QUERY: wrong timestamp at RRDR line %ld, expected %ld, got %ld, of query '%s'",
- rrdr_line, r->t[rrdr_line], t, r->internal.qt->id);
-
- return rrdr_line;
-}
-
-// ----------------------------------------------------------------------------
-// tier management
-
-static bool query_metric_is_valid_tier(QUERY_METRIC *qm, size_t tier) {
- if(!qm->tiers[tier].db_metric_handle || !qm->tiers[tier].db_first_time_s || !qm->tiers[tier].db_last_time_s || !qm->tiers[tier].db_update_every_s)
- return false;
-
- return true;
-}
-
-static size_t query_metric_first_working_tier(QUERY_METRIC *qm) {
- for(size_t tier = 0; tier < storage_tiers ; tier++) {
-
- // find the db time-range for this tier for all metrics
- STORAGE_METRIC_HANDLE *db_metric_handle = qm->tiers[tier].db_metric_handle;
- time_t first_time_s = qm->tiers[tier].db_first_time_s;
- time_t last_time_s = qm->tiers[tier].db_last_time_s;
- time_t update_every_s = qm->tiers[tier].db_update_every_s;
-
- if(!db_metric_handle || !first_time_s || !last_time_s || !update_every_s)
- continue;
-
- return tier;
- }
-
- return 0;
-}
-
-static long query_plan_points_coverage_weight(time_t db_first_time_s, time_t db_last_time_s, time_t db_update_every_s, time_t after_wanted, time_t before_wanted, size_t points_wanted, size_t tier __maybe_unused) {
- if(db_first_time_s == 0 ||
- db_last_time_s == 0 ||
- db_update_every_s == 0 ||
- db_first_time_s > before_wanted ||
- db_last_time_s < after_wanted)
- return -LONG_MAX;
-
- long long common_first_t = MAX(db_first_time_s, after_wanted);
- long long common_last_t = MIN(db_last_time_s, before_wanted);
-
- long long time_coverage = (common_last_t - common_first_t) * 1000000LL / (before_wanted - after_wanted);
- long long points_wanted_in_coverage = (long long)points_wanted * time_coverage / 1000000LL;
-
- long long points_available = (common_last_t - common_first_t) / db_update_every_s;
- long long points_delta = (long)(points_available - points_wanted_in_coverage);
- long long points_coverage = (points_delta < 0) ? (long)(points_available * time_coverage / points_wanted_in_coverage) : time_coverage;
-
- // a way to benefit higher tiers
- // points_coverage += (long)tier * 10000;
-
- if(points_available <= 0)
- return -LONG_MAX;
-
- return (long)(points_coverage + (25000LL * tier)); // 2.5% benefit for each higher tier
-}
-
-static size_t query_metric_best_tier_for_timeframe(QUERY_METRIC *qm, time_t after_wanted, time_t before_wanted, size_t points_wanted) {
- if(unlikely(storage_tiers < 2))
- return 0;
-
- if(unlikely(after_wanted == before_wanted || points_wanted <= 0))
- return query_metric_first_working_tier(qm);
-
- if(points_wanted < QUERY_PLAN_MIN_POINTS)
- // when selecting tiers, aim for a resolution of at least QUERY_PLAN_MIN_POINTS points
- points_wanted = (before_wanted - after_wanted) > QUERY_PLAN_MIN_POINTS ? QUERY_PLAN_MIN_POINTS : before_wanted - after_wanted;
-
- time_t min_first_time_s = 0;
- time_t max_last_time_s = 0;
-
- for(size_t tier = 0; tier < storage_tiers ; tier++) {
- time_t first_time_s = qm->tiers[tier].db_first_time_s;
- time_t last_time_s = qm->tiers[tier].db_last_time_s;
-
- if(!min_first_time_s || (first_time_s && first_time_s < min_first_time_s))
- min_first_time_s = first_time_s;
-
- if(!max_last_time_s || (last_time_s && last_time_s > max_last_time_s))
- max_last_time_s = last_time_s;
- }
-
- for(size_t tier = 0; tier < storage_tiers ; tier++) {
-
- // find the db time-range for this tier for all metrics
- STORAGE_METRIC_HANDLE *db_metric_handle = qm->tiers[tier].db_metric_handle;
- time_t first_time_s = qm->tiers[tier].db_first_time_s;
- time_t last_time_s = qm->tiers[tier].db_last_time_s;
- time_t update_every_s = qm->tiers[tier].db_update_every_s;
-
- if( !db_metric_handle ||
- !first_time_s ||
- !last_time_s ||
- !update_every_s ||
- first_time_s > before_wanted ||
- last_time_s < after_wanted
- ) {
- qm->tiers[tier].weight = -LONG_MAX;
- continue;
- }
-
- internal_fatal(first_time_s > before_wanted || last_time_s < after_wanted, "QUERY: invalid db durations");
-
- qm->tiers[tier].weight = query_plan_points_coverage_weight(
- min_first_time_s, max_last_time_s, update_every_s,
- after_wanted, before_wanted, points_wanted, tier);
- }
-
- size_t best_tier = 0;
- for(size_t tier = 1; tier < storage_tiers ; tier++) {
- if(qm->tiers[tier].weight >= qm->tiers[best_tier].weight)
- best_tier = tier;
- }
-
- return best_tier;
-}
-
-static size_t rrddim_find_best_tier_for_timeframe(QUERY_TARGET *qt, time_t after_wanted, time_t before_wanted, size_t points_wanted) {
- if(unlikely(storage_tiers < 2))
- return 0;
-
- if(unlikely(after_wanted == before_wanted || points_wanted <= 0)) {
- internal_error(true, "QUERY: '%s' has invalid params to tier calculation", qt->id);
- return 0;
- }
-
- long weight[storage_tiers];
-
- for(size_t tier = 0; tier < storage_tiers ; tier++) {
-
- time_t common_first_time_s = 0;
- time_t common_last_time_s = 0;
- time_t common_update_every_s = 0;
-
- // find the db time-range for this tier for all metrics
- for(size_t i = 0, used = qt->query.used; i < used ; i++) {
- QUERY_METRIC *qm = query_metric(qt, i);
-
- time_t first_time_s = qm->tiers[tier].db_first_time_s;
- time_t last_time_s = qm->tiers[tier].db_last_time_s;
- time_t update_every_s = qm->tiers[tier].db_update_every_s;
-
- if(!first_time_s || !last_time_s || !update_every_s)
- continue;
-
- if(!common_first_time_s)
- common_first_time_s = first_time_s;
- else
- common_first_time_s = MIN(first_time_s, common_first_time_s);
-
- if(!common_last_time_s)
- common_last_time_s = last_time_s;
- else
- common_last_time_s = MAX(last_time_s, common_last_time_s);
-
- if(!common_update_every_s)
- common_update_every_s = update_every_s;
- else
- common_update_every_s = MIN(update_every_s, common_update_every_s);
- }
-
- weight[tier] = query_plan_points_coverage_weight(common_first_time_s, common_last_time_s, common_update_every_s, after_wanted, before_wanted, points_wanted, tier);
- }
-
- size_t best_tier = 0;
- for(size_t tier = 1; tier < storage_tiers ; tier++) {
- if(weight[tier] >= weight[best_tier])
- best_tier = tier;
- }
-
- if(weight[best_tier] == -LONG_MAX)
- best_tier = 0;
-
- return best_tier;
-}
-
-static time_t rrdset_find_natural_update_every_for_timeframe(QUERY_TARGET *qt, time_t after_wanted, time_t before_wanted, size_t points_wanted, RRDR_OPTIONS options, size_t tier) {
- size_t best_tier;
- if((options & RRDR_OPTION_SELECTED_TIER) && tier < storage_tiers)
- best_tier = tier;
- else
- best_tier = rrddim_find_best_tier_for_timeframe(qt, after_wanted, before_wanted, points_wanted);
-
- // find the db minimum update every for this tier for all metrics
- time_t common_update_every_s = default_rrd_update_every;
- for(size_t i = 0, used = qt->query.used; i < used ; i++) {
- QUERY_METRIC *qm = query_metric(qt, i);
-
- time_t update_every_s = qm->tiers[best_tier].db_update_every_s;
-
- if(!i)
- common_update_every_s = update_every_s;
- else
- common_update_every_s = MIN(update_every_s, common_update_every_s);
- }
-
- return common_update_every_s;
-}
-
-// ----------------------------------------------------------------------------
-// query ops
-
-typedef struct query_point {
- STORAGE_POINT sp;
- NETDATA_DOUBLE value;
- bool added;
-#ifdef NETDATA_INTERNAL_CHECKS
- size_t id;
-#endif
-} QUERY_POINT;
-
-QUERY_POINT QUERY_POINT_EMPTY = {
- .sp = STORAGE_POINT_UNSET,
- .value = NAN,
- .added = false,
-#ifdef NETDATA_INTERNAL_CHECKS
- .id = 0,
-#endif
-};
-
-#ifdef NETDATA_INTERNAL_CHECKS
-#define query_point_set_id(point, point_id) (point).id = point_id
-#else
-#define query_point_set_id(point, point_id) debug_dummy()
-#endif
-
-typedef struct query_engine_ops {
- // configuration
- RRDR *r;
- QUERY_METRIC *qm;
- time_t view_update_every;
- time_t query_granularity;
- TIER_QUERY_FETCH tier_query_fetch;
-
- // query planer
- size_t current_plan;
- time_t current_plan_expire_time;
- time_t plan_expanded_after;
- time_t plan_expanded_before;
-
- // storage queries
- size_t tier;
- struct query_metric_tier *tier_ptr;
- struct storage_engine_query_handle *handle;
-
- // aggregating points over time
- size_t group_points_non_zero;
- size_t group_points_added;
- STORAGE_POINT group_point; // aggregates min, max, sum, count, anomaly count for each group point
- STORAGE_POINT query_point; // aggregates min, max, sum, count, anomaly count across the whole query
- RRDR_VALUE_FLAGS group_value_flags;
-
- // statistics
- size_t db_total_points_read;
- size_t db_points_read_per_tier[RRD_STORAGE_TIERS];
-
- struct {
- time_t expanded_after;
- time_t expanded_before;
- struct storage_engine_query_handle handle;
- bool initialized;
- bool finalized;
- } plans[QUERY_PLANS_MAX];
-
- struct query_engine_ops *next;
-} QUERY_ENGINE_OPS;
-
-
-// ----------------------------------------------------------------------------
-// query planer
-
-#define query_plan_should_switch_plan(ops, now) ((now) >= (ops)->current_plan_expire_time)
-
-static size_t query_planer_expand_duration_in_points(time_t this_update_every, time_t next_update_every) {
-
- time_t delta = this_update_every - next_update_every;
- if(delta < 0) delta = -delta;
-
- size_t points;
- if(delta < this_update_every * POINTS_TO_EXPAND_QUERY)
- points = POINTS_TO_EXPAND_QUERY;
- else
- points = (delta + this_update_every - 1) / this_update_every;
-
- return points;
-}
-
-static void query_planer_initialize_plans(QUERY_ENGINE_OPS *ops) {
- QUERY_METRIC *qm = ops->qm;
-
- for(size_t p = 0; p < qm->plan.used ; p++) {
- size_t tier = qm->plan.array[p].tier;
- time_t update_every = qm->tiers[tier].db_update_every_s;
-
- size_t points_to_add_to_after;
- if(p > 0) {
- // there is another plan before to this
-
- size_t tier0 = qm->plan.array[p - 1].tier;
- time_t update_every0 = qm->tiers[tier0].db_update_every_s;
-
- points_to_add_to_after = query_planer_expand_duration_in_points(update_every, update_every0);
- }
- else
- points_to_add_to_after = (tier == 0) ? 0 : POINTS_TO_EXPAND_QUERY;
-
- size_t points_to_add_to_before;
- if(p + 1 < qm->plan.used) {
- // there is another plan after to this
-
- size_t tier1 = qm->plan.array[p+1].tier;
- time_t update_every1 = qm->tiers[tier1].db_update_every_s;
-
- points_to_add_to_before = query_planer_expand_duration_in_points(update_every, update_every1);
- }
- else
- points_to_add_to_before = POINTS_TO_EXPAND_QUERY;
-
- time_t after = qm->plan.array[p].after - (time_t)(update_every * points_to_add_to_after);
- time_t before = qm->plan.array[p].before + (time_t)(update_every * points_to_add_to_before);
-
- ops->plans[p].expanded_after = after;
- ops->plans[p].expanded_before = before;
-
- ops->r->internal.qt->db.tiers[tier].queries++;
-
- struct query_metric_tier *tier_ptr = &qm->tiers[tier];
- STORAGE_ENGINE *eng = query_metric_storage_engine(ops->r->internal.qt, qm, tier);
- storage_engine_query_init(eng->backend, tier_ptr->db_metric_handle, &ops->plans[p].handle,
- after, before, ops->r->internal.qt->request.priority);
-
- ops->plans[p].initialized = true;
- ops->plans[p].finalized = false;
- }
-}
-
-static void query_planer_finalize_plan(QUERY_ENGINE_OPS *ops, size_t plan_id) {
- // QUERY_METRIC *qm = ops->qm;
-
- if(ops->plans[plan_id].initialized && !ops->plans[plan_id].finalized) {
- storage_engine_query_finalize(&ops->plans[plan_id].handle);
- ops->plans[plan_id].initialized = false;
- ops->plans[plan_id].finalized = true;
- }
-}
-
-static void query_planer_finalize_remaining_plans(QUERY_ENGINE_OPS *ops) {
- QUERY_METRIC *qm = ops->qm;
-
- for(size_t p = 0; p < qm->plan.used ; p++)
- query_planer_finalize_plan(ops, p);
-}
-
-static void query_planer_activate_plan(QUERY_ENGINE_OPS *ops, size_t plan_id, time_t overwrite_after __maybe_unused) {
- QUERY_METRIC *qm = ops->qm;
-
- internal_fatal(plan_id >= qm->plan.used, "QUERY: invalid plan_id given");
- internal_fatal(!ops->plans[plan_id].initialized, "QUERY: plan has not been initialized");
- internal_fatal(ops->plans[plan_id].finalized, "QUERY: plan has been finalized");
-
- internal_fatal(qm->plan.array[plan_id].after > qm->plan.array[plan_id].before, "QUERY: flipped after/before");
-
- ops->tier = qm->plan.array[plan_id].tier;
- ops->tier_ptr = &qm->tiers[ops->tier];
- ops->handle = &ops->plans[plan_id].handle;
- ops->current_plan = plan_id;
-
- if(plan_id + 1 < qm->plan.used && qm->plan.array[plan_id + 1].after < qm->plan.array[plan_id].before)
- ops->current_plan_expire_time = qm->plan.array[plan_id + 1].after;
- else
- ops->current_plan_expire_time = qm->plan.array[plan_id].before;
-
- ops->plan_expanded_after = ops->plans[plan_id].expanded_after;
- ops->plan_expanded_before = ops->plans[plan_id].expanded_before;
-}
-
-static bool query_planer_next_plan(QUERY_ENGINE_OPS *ops, time_t now, time_t last_point_end_time) {
- QUERY_METRIC *qm = ops->qm;
-
- size_t old_plan = ops->current_plan;
-
- time_t next_plan_before_time;
- do {
- ops->current_plan++;
-
- if (ops->current_plan >= qm->plan.used) {
- ops->current_plan = old_plan;
- ops->current_plan_expire_time = ops->r->internal.qt->window.before;
- // let the query run with current plan
- // we will not switch it
- return false;
- }
-
- next_plan_before_time = qm->plan.array[ops->current_plan].before;
- } while(now >= next_plan_before_time || last_point_end_time >= next_plan_before_time);
-
- if(!query_metric_is_valid_tier(qm, qm->plan.array[ops->current_plan].tier)) {
- ops->current_plan = old_plan;
- ops->current_plan_expire_time = ops->r->internal.qt->window.before;
- return false;
- }
-
- query_planer_finalize_plan(ops, old_plan);
- query_planer_activate_plan(ops, ops->current_plan, MIN(now, last_point_end_time));
- return true;
-}
-
-static int compare_query_plan_entries_on_start_time(const void *a, const void *b) {
- QUERY_PLAN_ENTRY *p1 = (QUERY_PLAN_ENTRY *)a;
- QUERY_PLAN_ENTRY *p2 = (QUERY_PLAN_ENTRY *)b;
- return (p1->after < p2->after)?-1:1;
-}
-
-static bool query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before_wanted, size_t points_wanted) {
- QUERY_METRIC *qm = ops->qm;
-
- // put our selected tier as the first plan
- size_t selected_tier;
- bool switch_tiers = true;
-
- if((ops->r->internal.qt->window.options & RRDR_OPTION_SELECTED_TIER)
- && ops->r->internal.qt->window.tier < storage_tiers
- && query_metric_is_valid_tier(qm, ops->r->internal.qt->window.tier)) {
- selected_tier = ops->r->internal.qt->window.tier;
- switch_tiers = false;
- }
- else {
- selected_tier = query_metric_best_tier_for_timeframe(qm, after_wanted, before_wanted, points_wanted);
-
- if(!query_metric_is_valid_tier(qm, selected_tier))
- return false;
- }
-
- if(qm->tiers[selected_tier].db_first_time_s > before_wanted ||
- qm->tiers[selected_tier].db_last_time_s < after_wanted) {
- // we don't have any data to satisfy this query
- return false;
- }
-
- qm->plan.used = 1;
- qm->plan.array[0].tier = selected_tier;
- qm->plan.array[0].after = (qm->tiers[selected_tier].db_first_time_s < after_wanted) ? after_wanted : qm->tiers[selected_tier].db_first_time_s;
- qm->plan.array[0].before = (qm->tiers[selected_tier].db_last_time_s > before_wanted) ? before_wanted : qm->tiers[selected_tier].db_last_time_s;
-
- if(switch_tiers) {
- // the selected tier
- time_t selected_tier_first_time_s = qm->plan.array[0].after;
- time_t selected_tier_last_time_s = qm->plan.array[0].before;
-
- // check if our selected tier can start the query
- if (selected_tier_first_time_s > after_wanted) {
- // we need some help from other tiers
- for (size_t tr = (int)selected_tier + 1; tr < storage_tiers && qm->plan.used < QUERY_PLANS_MAX ; tr++) {
- if(!query_metric_is_valid_tier(qm, tr))
- continue;
-
- // find the first time of this tier
- time_t tier_first_time_s = qm->tiers[tr].db_first_time_s;
- time_t tier_last_time_s = qm->tiers[tr].db_last_time_s;
-
- // can it help?
- if (tier_first_time_s < selected_tier_first_time_s && tier_first_time_s <= before_wanted && tier_last_time_s >= after_wanted) {
- // it can help us add detail at the beginning of the query
- QUERY_PLAN_ENTRY t = {
- .tier = tr,
- .after = (tier_first_time_s < after_wanted) ? after_wanted : tier_first_time_s,
- .before = selected_tier_first_time_s,
- };
- ops->plans[qm->plan.used].initialized = false;
- ops->plans[qm->plan.used].finalized = false;
- qm->plan.array[qm->plan.used++] = t;
-
- internal_fatal(!t.after || !t.before, "QUERY: invalid plan selected");
-
- // prepare for the tier
- selected_tier_first_time_s = t.after;
-
- if (t.after <= after_wanted)
- break;
- }
- }
- }
-
- // check if our selected tier can finish the query
- if (selected_tier_last_time_s < before_wanted) {
- // we need some help from other tiers
- for (int tr = (int)selected_tier - 1; tr >= 0 && qm->plan.used < QUERY_PLANS_MAX ; tr--) {
- if(!query_metric_is_valid_tier(qm, tr))
- continue;
-
- // find the last time of this tier
- time_t tier_first_time_s = qm->tiers[tr].db_first_time_s;
- time_t tier_last_time_s = qm->tiers[tr].db_last_time_s;
-
- //buffer_sprintf(wb, ": EVAL BEFORE tier %d, %ld", tier, last_time_s);
-
- // can it help?
- if (tier_last_time_s > selected_tier_last_time_s && tier_first_time_s <= before_wanted && tier_last_time_s >= after_wanted) {
- // it can help us add detail at the end of the query
- QUERY_PLAN_ENTRY t = {
- .tier = tr,
- .after = selected_tier_last_time_s,
- .before = (tier_last_time_s > before_wanted) ? before_wanted : tier_last_time_s,
- };
- ops->plans[qm->plan.used].initialized = false;
- ops->plans[qm->plan.used].finalized = false;
- qm->plan.array[qm->plan.used++] = t;
-
- // prepare for the tier
- selected_tier_last_time_s = t.before;
-
- internal_fatal(!t.after || !t.before, "QUERY: invalid plan selected");
-
- if (t.before >= before_wanted)
- break;
- }
- }
- }
- }
-
- // sort the query plan
- if(qm->plan.used > 1)
- qsort(&qm->plan.array, qm->plan.used, sizeof(QUERY_PLAN_ENTRY), compare_query_plan_entries_on_start_time);
-
- if(!query_metric_is_valid_tier(qm, qm->plan.array[0].tier))
- return false;
-
-#ifdef NETDATA_INTERNAL_CHECKS
- for(size_t p = 0; p < qm->plan.used ;p++) {
- internal_fatal(qm->plan.array[p].after > qm->plan.array[p].before, "QUERY: flipped after/before");
- internal_fatal(qm->plan.array[p].after < after_wanted, "QUERY: too small plan first time");
- internal_fatal(qm->plan.array[p].before > before_wanted, "QUERY: too big plan last time");
- }
-#endif
-
- query_planer_initialize_plans(ops);
- query_planer_activate_plan(ops, 0, 0);
-
- return true;
-}
-
-
-// ----------------------------------------------------------------------------
-// dimension level query engine
-
-#define query_interpolate_point(this_point, last_point, now) do { \
- if(likely( \
- /* the point to interpolate is more than 1s wide */ \
- (this_point).sp.end_time_s - (this_point).sp.start_time_s > 1 \
- \
- /* the two points are exactly next to each other */ \
- && (last_point).sp.end_time_s == (this_point).sp.start_time_s \
- \
- /* both points are valid numbers */ \
- && netdata_double_isnumber((this_point).value) \
- && netdata_double_isnumber((last_point).value) \
- \
- )) { \
- (this_point).value = (last_point).value + ((this_point).value - (last_point).value) * (1.0 - (NETDATA_DOUBLE)((this_point).sp.end_time_s - (now)) / (NETDATA_DOUBLE)((this_point).sp.end_time_s - (this_point).sp.start_time_s)); \
- (this_point).sp.end_time_s = now; \
- } \
-} while(0)
-
-#define query_add_point_to_group(r, point, ops, add_flush) do { \
- if(likely(netdata_double_isnumber((point).value))) { \
- if(likely(fpclassify((point).value) != FP_ZERO)) \
- (ops)->group_points_non_zero++; \
- \
- if(unlikely((point).sp.flags & SN_FLAG_RESET)) \
- (ops)->group_value_flags |= RRDR_VALUE_RESET; \
- \
- time_grouping_add(r, (point).value, add_flush); \
- \
- storage_point_merge_to((ops)->group_point, (point).sp); \
- if(!(point).added) \
- storage_point_merge_to((ops)->query_point, (point).sp); \
- } \
- \
- (ops)->group_points_added++; \
-} while(0)
-
-static __thread QUERY_ENGINE_OPS *released_ops = NULL;
-
-static void rrd2rrdr_query_ops_freeall(RRDR *r __maybe_unused) {
- while(released_ops) {
- QUERY_ENGINE_OPS *ops = released_ops;
- released_ops = ops->next;
-
- onewayalloc_freez(r->internal.owa, ops);
- }
-}
-
-static void rrd2rrdr_query_ops_release(QUERY_ENGINE_OPS *ops) {
- if(!ops) return;
-
- ops->next = released_ops;
- released_ops = ops;
-}
-
-static QUERY_ENGINE_OPS *rrd2rrdr_query_ops_get(RRDR *r) {
- QUERY_ENGINE_OPS *ops;
- if(released_ops) {
- ops = released_ops;
- released_ops = ops->next;
- }
- else {
- ops = onewayalloc_mallocz(r->internal.owa, sizeof(QUERY_ENGINE_OPS));
- }
-
- memset(ops, 0, sizeof(*ops));
- return ops;
-}
-
-static QUERY_ENGINE_OPS *rrd2rrdr_query_ops_prep(RRDR *r, size_t query_metric_id) {
- QUERY_TARGET *qt = r->internal.qt;
-
- QUERY_ENGINE_OPS *ops = rrd2rrdr_query_ops_get(r);
- *ops = (QUERY_ENGINE_OPS) {
- .r = r,
- .qm = query_metric(qt, query_metric_id),
- .tier_query_fetch = r->time_grouping.tier_query_fetch,
- .view_update_every = r->view.update_every,
- .query_granularity = (time_t)(r->view.update_every / r->view.group),
- .group_value_flags = RRDR_VALUE_NOTHING,
- };
-
- if(!query_plan(ops, qt->window.after, qt->window.before, qt->window.points)) {
- rrd2rrdr_query_ops_release(ops);
- return NULL;
- }
-
- return ops;
-}
-
-static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_OPS *ops) {
- QUERY_TARGET *qt = r->internal.qt;
- QUERY_METRIC *qm = ops->qm;
-
- const RRDR_TIME_GROUPING add_flush = r->time_grouping.add_flush;
-
- ops->group_point = STORAGE_POINT_UNSET;
- ops->query_point = STORAGE_POINT_UNSET;
-
- RRDR_OPTIONS options = qt->window.options;
- size_t points_wanted = qt->window.points;
- time_t after_wanted = qt->window.after;
- time_t before_wanted = qt->window.before; (void)before_wanted;
-
-// bool debug_this = false;
-// if(strcmp("user", string2str(rd->id)) == 0 && strcmp("system.cpu", string2str(rd->rrdset->id)) == 0)
-// debug_this = true;
-
- size_t points_added = 0;
-
- long rrdr_line = -1;
- bool use_anomaly_bit_as_value = (r->internal.qt->window.options & RRDR_OPTION_ANOMALY_BIT) ? true : false;
-
- NETDATA_DOUBLE min = r->view.min, max = r->view.max;
-
- QUERY_POINT last2_point = QUERY_POINT_EMPTY;
- QUERY_POINT last1_point = QUERY_POINT_EMPTY;
- QUERY_POINT new_point = QUERY_POINT_EMPTY;
-
- // ONE POINT READ-AHEAD
- // when we switch plans, we read-ahead a point from the next plan
- // to join them smoothly at the exact time the next plan begins
- STORAGE_POINT next1_point = STORAGE_POINT_UNSET;
-
- time_t now_start_time = after_wanted - ops->query_granularity;
- time_t now_end_time = after_wanted + ops->view_update_every - ops->query_granularity;
-
- size_t db_points_read_since_plan_switch = 0; (void)db_points_read_since_plan_switch;
- size_t query_is_finished_counter = 0;
-
- // The main loop, based on the query granularity we need
- for( ; points_added < points_wanted && query_is_finished_counter <= 10 ;
- now_start_time = now_end_time, now_end_time += ops->view_update_every) {
-
- if(unlikely(query_plan_should_switch_plan(ops, now_end_time))) {
- query_planer_next_plan(ops, now_end_time, new_point.sp.end_time_s);
- db_points_read_since_plan_switch = 0;
- }
-
- // read all the points of the db, prior to the time we need (now_end_time)
-
- size_t count_same_end_time = 0;
- while(count_same_end_time < 100) {
- if(likely(count_same_end_time == 0)) {
- last2_point = last1_point;
- last1_point = new_point;
- }
-
- if(unlikely(storage_engine_query_is_finished(ops->handle))) {
- query_is_finished_counter++;
-
- if(count_same_end_time != 0) {
- last2_point = last1_point;
- last1_point = new_point;
- }
- new_point = QUERY_POINT_EMPTY;
- new_point.sp.start_time_s = last1_point.sp.end_time_s;
- new_point.sp.end_time_s = now_end_time;
-//
-// if(debug_this) netdata_log_info("QUERY: is finished() returned true");
-//
- break;
- }
- else
- query_is_finished_counter = 0;
-
- // fetch the new point
- {
- STORAGE_POINT sp;
- if(likely(storage_point_is_unset(next1_point))) {
- db_points_read_since_plan_switch++;
- sp = storage_engine_query_next_metric(ops->handle);
- ops->db_points_read_per_tier[ops->tier]++;
- ops->db_total_points_read++;
-
- if(unlikely(options & RRDR_OPTION_ABSOLUTE))
- storage_point_make_positive(sp);
- }
- else {
- // ONE POINT READ-AHEAD
- sp = next1_point;
- storage_point_unset(next1_point);
- db_points_read_since_plan_switch = 1;
- }
-
- // ONE POINT READ-AHEAD
- if(unlikely(query_plan_should_switch_plan(ops, sp.end_time_s) &&
- query_planer_next_plan(ops, now_end_time, new_point.sp.end_time_s))) {
-
- // The end time of the current point, crosses our plans (tiers)
- // so, we switched plan (tier)
- //
- // There are 2 cases now:
- //
- // A. the entire point of the previous plan is to the future of point from the next plan
- // B. part of the point of the previous plan overlaps with the point from the next plan
-
- STORAGE_POINT sp2 = storage_engine_query_next_metric(ops->handle);
- ops->db_points_read_per_tier[ops->tier]++;
- ops->db_total_points_read++;
-
- if(unlikely(options & RRDR_OPTION_ABSOLUTE))
- storage_point_make_positive(sp);
-
- if(sp.start_time_s > sp2.start_time_s)
- // the point from the previous plan is useless
- sp = sp2;
- else
- // let the query run from the previous plan
- // but setting this will also cut off the interpolation
- // of the point from the previous plan
- next1_point = sp2;
- }
-
- new_point.sp = sp;
- new_point.added = false;
- query_point_set_id(new_point, ops->db_total_points_read);
-
-// if(debug_this)
-// netdata_log_info("QUERY: got point %zu, from time %ld to %ld // now from %ld to %ld // query from %ld to %ld",
-// new_point.id, new_point.start_time, new_point.end_time, now_start_time, now_end_time, after_wanted, before_wanted);
-//
- // get the right value from the point we got
- if(likely(!storage_point_is_unset(sp) && !storage_point_is_gap(sp))) {
-
- if(unlikely(use_anomaly_bit_as_value))
- new_point.value = storage_point_anomaly_rate(new_point.sp);
-
- else {
- switch (ops->tier_query_fetch) {
- default:
- case TIER_QUERY_FETCH_AVERAGE:
- new_point.value = sp.sum / (NETDATA_DOUBLE)sp.count;
- break;
-
- case TIER_QUERY_FETCH_MIN:
- new_point.value = sp.min;
- break;
-
- case TIER_QUERY_FETCH_MAX:
- new_point.value = sp.max;
- break;
-
- case TIER_QUERY_FETCH_SUM:
- new_point.value = sp.sum;
- break;
- };
- }
- }
- else
- new_point.value = NAN;
- }
-
- // check if the db is giving us zero duration points
- if(unlikely(db_points_read_since_plan_switch > 1 &&
- new_point.sp.start_time_s == new_point.sp.end_time_s)) {
-
- internal_error(true, "QUERY: '%s', dimension '%s' next_metric() returned "
- "point %zu from %ld to %ld, that are both equal",
- qt->id, query_metric_id(qt, qm),
- new_point.id, new_point.sp.start_time_s, new_point.sp.end_time_s);
-
- new_point.sp.start_time_s = new_point.sp.end_time_s - ops->tier_ptr->db_update_every_s;
- }
-
- // check if the db is advancing the query
- if(unlikely(db_points_read_since_plan_switch > 1 &&
- new_point.sp.end_time_s <= last1_point.sp.end_time_s)) {
-
- internal_error(true,
- "QUERY: '%s', dimension '%s' next_metric() returned "
- "point %zu from %ld to %ld, before the "
- "last point %zu from %ld to %ld, "
- "now is %ld to %ld",
- qt->id, query_metric_id(qt, qm),
- new_point.id, new_point.sp.start_time_s, new_point.sp.end_time_s,
- last1_point.id, last1_point.sp.start_time_s, last1_point.sp.end_time_s,
- now_start_time, now_end_time);
-
- count_same_end_time++;
- continue;
- }
- count_same_end_time = 0;
-
- // decide how to use this point
- if(likely(new_point.sp.end_time_s < now_end_time)) { // likely to favor tier0
- // this db point ends before our now_end_time
-
- if(likely(new_point.sp.end_time_s >= now_start_time)) { // likely to favor tier0
- // this db point ends after our now_start time
-
- query_add_point_to_group(r, new_point, ops, add_flush);
- new_point.added = true;
- }
- else {
- // we don't need this db point
- // it is totally outside our current time-frame
-
- // this is desirable for the first point of the query
- // because it allows us to interpolate the next point
- // at exactly the time we will want
-
- // we only log if this is not point 1
- internal_error(new_point.sp.end_time_s < ops->plan_expanded_after &&
- db_points_read_since_plan_switch > 1,
- "QUERY: '%s', dimension '%s' next_metric() "
- "returned point %zu from %ld time %ld, "
- "which is entirely before our current timeframe %ld to %ld "
- "(and before the entire query, after %ld, before %ld)",
- qt->id, query_metric_id(qt, qm),
- new_point.id, new_point.sp.start_time_s, new_point.sp.end_time_s,
- now_start_time, now_end_time,
- ops->plan_expanded_after, ops->plan_expanded_before);
- }
-
- }
- else {
- // the point ends in the future
- // so, we will interpolate it below, at the inner loop
- break;
- }
- }
-
- if(unlikely(count_same_end_time)) {
- internal_error(true,
- "QUERY: '%s', dimension '%s', the database does not advance the query,"
- " it returned an end time less or equal to the end time of the last "
- "point we got %ld, %zu times",
- qt->id, query_metric_id(qt, qm),
- last1_point.sp.end_time_s, count_same_end_time);
-
- if(unlikely(new_point.sp.end_time_s <= last1_point.sp.end_time_s))
- new_point.sp.end_time_s = now_end_time;
- }
-
- time_t stop_time = new_point.sp.end_time_s;
- if(unlikely(!storage_point_is_unset(next1_point) && next1_point.start_time_s >= now_end_time)) {
- // ONE POINT READ-AHEAD
- // the point crosses the start time of the
- // read ahead storage point we have read
- stop_time = next1_point.start_time_s;
- }
-
- // the inner loop
- // we have 3 points in memory: last2, last1, new
- // we select the one to use based on their timestamps
-
- internal_fatal(now_end_time > stop_time || points_added >= points_wanted,
- "QUERY: first part of query provides invalid point to interpolate (now_end_time %ld, stop_time %ld",
- now_end_time, stop_time);
-
- do {
- // now_start_time is wrong in this loop
- // but, we don't need it
-
- QUERY_POINT current_point;
-
- if(likely(now_end_time > new_point.sp.start_time_s)) {
- // it is time for our NEW point to be used
- current_point = new_point;
- new_point.added = true; // first copy, then set it, so that new_point will not be added again
- query_interpolate_point(current_point, last1_point, now_end_time);
-
-// internal_error(current_point.id > 0
-// && last1_point.id == 0
-// && current_point.end_time > after_wanted
-// && current_point.end_time > now_end_time,
-// "QUERY: '%s', dimension '%s', after %ld, before %ld, view update every %ld,"
-// " query granularity %ld, interpolating point %zu (from %ld to %ld) at %ld,"
-// " but we could really favor by having last_point1 in this query.",
-// qt->id, string2str(qm->dimension.id),
-// after_wanted, before_wanted,
-// ops.view_update_every, ops.query_granularity,
-// current_point.id, current_point.start_time, current_point.end_time,
-// now_end_time);
- }
- else if(likely(now_end_time <= last1_point.sp.end_time_s)) {
- // our LAST point is still valid
- current_point = last1_point;
- last1_point.added = true; // first copy, then set it, so that last1_point will not be added again
- query_interpolate_point(current_point, last2_point, now_end_time);
-
-// internal_error(current_point.id > 0
-// && last2_point.id == 0
-// && current_point.end_time > after_wanted
-// && current_point.end_time > now_end_time,
-// "QUERY: '%s', dimension '%s', after %ld, before %ld, view update every %ld,"
-// " query granularity %ld, interpolating point %zu (from %ld to %ld) at %ld,"
-// " but we could really favor by having last_point2 in this query.",
-// qt->id, string2str(qm->dimension.id),
-// after_wanted, before_wanted, ops.view_update_every, ops.query_granularity,
-// current_point.id, current_point.start_time, current_point.end_time,
-// now_end_time);
- }
- else {
- // a GAP, we don't have a value this time
- current_point = QUERY_POINT_EMPTY;
- }
-
- query_add_point_to_group(r, current_point, ops, add_flush);
-
- rrdr_line = rrdr_line_init(r, now_end_time, rrdr_line);
- size_t rrdr_o_v_index = rrdr_line * r->d + dim_id_in_rrdr;
-
- // find the place to store our values
- RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_o_v_index];
-
- // update the dimension options
- if(likely(ops->group_points_non_zero))
- r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
-
- // store the specific point options
- *rrdr_value_options_ptr = ops->group_value_flags;
-
- // store the group value
- NETDATA_DOUBLE group_value = time_grouping_flush(r, rrdr_value_options_ptr, add_flush);
- r->v[rrdr_o_v_index] = group_value;
-
- r->ar[rrdr_o_v_index] = storage_point_anomaly_rate(ops->group_point);
-
- if(likely(points_added || r->internal.queries_count)) {
- // find the min/max across all dimensions
-
- if(unlikely(group_value < min)) min = group_value;
- if(unlikely(group_value > max)) max = group_value;
-
- }
- else {
- // runs only when r->internal.queries_count == 0 && points_added == 0
- // so, on the first point added for the query.
- min = max = group_value;
- }
-
- points_added++;
- ops->group_points_added = 0;
- ops->group_value_flags = RRDR_VALUE_NOTHING;
- ops->group_points_non_zero = 0;
- ops->group_point = STORAGE_POINT_UNSET;
-
- now_end_time += ops->view_update_every;
- } while(now_end_time <= stop_time && points_added < points_wanted);
-
- // the loop above increased "now" by ops->view_update_every,
- // but the main loop will increase it too,
- // so, let's undo the last iteration of this loop
- now_end_time -= ops->view_update_every;
- }
- query_planer_finalize_remaining_plans(ops);
-
- qm->query_points = ops->query_point;
-
- // fill the rest of the points with empty values
- while (points_added < points_wanted) {
- rrdr_line++;
- size_t rrdr_o_v_index = rrdr_line * r->d + dim_id_in_rrdr;
- r->o[rrdr_o_v_index] = RRDR_VALUE_EMPTY;
- r->v[rrdr_o_v_index] = 0.0;
- r->ar[rrdr_o_v_index] = 0.0;
- points_added++;
- }
-
- r->internal.queries_count++;
- r->view.min = min;
- r->view.max = max;
-
- r->stats.result_points_generated += points_added;
- r->stats.db_points_read += ops->db_total_points_read;
- for(size_t tr = 0; tr < storage_tiers ; tr++)
- qt->db.tiers[tr].points += ops->db_points_read_per_tier[tr];
-}
-
-// ----------------------------------------------------------------------------
-// fill the gap of a tier
-
-void store_metric_at_tier(RRDDIM *rd, size_t tier, struct rrddim_tier *t, STORAGE_POINT sp, usec_t now_ut);
-
-void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, size_t tier, time_t now_s) {
- if(unlikely(tier >= storage_tiers)) return;
- if(storage_tiers_backfill[tier] == RRD_BACKFILL_NONE) return;
-
- struct rrddim_tier *t = &rd->tiers[tier];
- if(unlikely(!t)) return;
-
- time_t latest_time_s = storage_engine_latest_time_s(t->backend, t->db_metric_handle);
- time_t granularity = (time_t)t->tier_grouping * (time_t)rd->rrdset->update_every;
- time_t time_diff = now_s - latest_time_s;
-
- // if the user wants only NEW backfilling, and we don't have any data
- if(storage_tiers_backfill[tier] == RRD_BACKFILL_NEW && latest_time_s <= 0) return;
-
- // there is really nothing we can do
- if(now_s <= latest_time_s || time_diff < granularity) return;
-
- struct storage_engine_query_handle handle;
-
- // for each lower tier
- for(int read_tier = (int)tier - 1; read_tier >= 0 ; read_tier--){
- time_t smaller_tier_first_time = storage_engine_oldest_time_s(rd->tiers[read_tier].backend, rd->tiers[read_tier].db_metric_handle);
- time_t smaller_tier_last_time = storage_engine_latest_time_s(rd->tiers[read_tier].backend, rd->tiers[read_tier].db_metric_handle);
- if(smaller_tier_last_time <= latest_time_s) continue; // it is as bad as we are
-
- long after_wanted = (latest_time_s < smaller_tier_first_time) ? smaller_tier_first_time : latest_time_s;
- long before_wanted = smaller_tier_last_time;
-
- struct rrddim_tier *tmp = &rd->tiers[read_tier];
- storage_engine_query_init(tmp->backend, tmp->db_metric_handle, &handle, after_wanted, before_wanted, STORAGE_PRIORITY_HIGH);
-
- size_t points_read = 0;
-
- while(!storage_engine_query_is_finished(&handle)) {
-
- STORAGE_POINT sp = storage_engine_query_next_metric(&handle);
- points_read++;
-
- if(sp.end_time_s > latest_time_s) {
- latest_time_s = sp.end_time_s;
- store_metric_at_tier(rd, tier, t, sp, sp.end_time_s * USEC_PER_SEC);
- }
- }
-
- storage_engine_query_finalize(&handle);
- store_metric_collection_completed();
- global_statistics_backfill_query_completed(points_read);
-
- //internal_error(true, "DBENGINE: backfilled chart '%s', dimension '%s', tier %d, from %ld to %ld, with %zu points from tier %d",
- // rd->rrdset->name, rd->name, tier, after_wanted, before_wanted, points, tr);
- }
-}
-
-// ----------------------------------------------------------------------------
-// fill RRDR for the whole chart
-
-#ifdef NETDATA_INTERNAL_CHECKS
-static void rrd2rrdr_log_request_response_metadata(RRDR *r
- , RRDR_OPTIONS options __maybe_unused
- , RRDR_TIME_GROUPING group_method
- , bool aligned
- , size_t group
- , time_t resampling_time
- , size_t resampling_group
- , time_t after_wanted
- , time_t after_requested
- , time_t before_wanted
- , time_t before_requested
- , size_t points_requested
- , size_t points_wanted
- //, size_t after_slot
- //, size_t before_slot
- , const char *msg
- ) {
-
- QUERY_TARGET *qt = r->internal.qt;
- time_t first_entry_s = qt->db.first_time_s;
- time_t last_entry_s = qt->db.last_time_s;
-
- internal_error(
- true,
- "rrd2rrdr() on %s update every %ld with %s grouping %s (group: %zu, resampling_time: %ld, resampling_group: %zu), "
- "after (got: %ld, want: %ld, req: %ld, db: %ld), "
- "before (got: %ld, want: %ld, req: %ld, db: %ld), "
- "duration (got: %ld, want: %ld, req: %ld, db: %ld), "
- "points (got: %zu, want: %zu, req: %zu), "
- "%s"
- , qt->id
- , qt->window.query_granularity
-
- // grouping
- , (aligned) ? "aligned" : "unaligned"
- , time_grouping_method2string(group_method)
- , group
- , resampling_time
- , resampling_group
-
- // after
- , r->view.after
- , after_wanted
- , after_requested
- , first_entry_s
-
- // before
- , r->view.before
- , before_wanted
- , before_requested
- , last_entry_s
-
- // duration
- , (long)(r->view.before - r->view.after + qt->window.query_granularity)
- , (long)(before_wanted - after_wanted + qt->window.query_granularity)
- , (long)before_requested - after_requested
- , (long)((last_entry_s - first_entry_s) + qt->window.query_granularity)
-
- // points
- , r->rows
- , points_wanted
- , points_requested
-
- // message
- , msg
- );
-}
-#endif // NETDATA_INTERNAL_CHECKS
-
-// #define DEBUG_QUERY_LOGIC 1
-
-#ifdef DEBUG_QUERY_LOGIC
-#define query_debug_log_init() BUFFER *debug_log = buffer_create(1000)
-#define query_debug_log(args...) buffer_sprintf(debug_log, ##args)
-#define query_debug_log_fin() { \
- netdata_log_info("QUERY: '%s', after:%ld, before:%ld, duration:%ld, points:%zu, res:%ld - wanted => after:%ld, before:%ld, points:%zu, group:%zu, granularity:%ld, resgroup:%ld, resdiv:" NETDATA_DOUBLE_FORMAT_AUTO " %s", qt->id, after_requested, before_requested, before_requested - after_requested, points_requested, resampling_time_requested, after_wanted, before_wanted, points_wanted, group, query_granularity, resampling_group, resampling_divisor, buffer_tostring(debug_log)); \
- buffer_free(debug_log); \
- debug_log = NULL; \
- }
-#define query_debug_log_free() do { buffer_free(debug_log); } while(0)
-#else
-#define query_debug_log_init() debug_dummy()
-#define query_debug_log(args...) debug_dummy()
-#define query_debug_log_fin() debug_dummy()
-#define query_debug_log_free() debug_dummy()
-#endif
-
-bool query_target_calculate_window(QUERY_TARGET *qt) {
- if (unlikely(!qt)) return false;
-
- size_t points_requested = (long)qt->request.points;
- time_t after_requested = qt->request.after;
- time_t before_requested = qt->request.before;
- RRDR_TIME_GROUPING group_method = qt->request.time_group_method;
- time_t resampling_time_requested = qt->request.resampling_time;
- RRDR_OPTIONS options = qt->window.options;
- size_t tier = qt->request.tier;
- time_t update_every = qt->db.minimum_latest_update_every_s ? qt->db.minimum_latest_update_every_s : 1;
-
- // RULES
- // points_requested = 0
- // the user wants all the natural points the database has
- //
- // after_requested = 0
- // the user wants to start the query from the oldest point in our database
- //
- // before_requested = 0
- // the user wants the query to end to the latest point in our database
- //
- // when natural points are wanted, the query has to be aligned to the update_every
- // of the database
-
- size_t points_wanted = points_requested;
- time_t after_wanted = after_requested;
- time_t before_wanted = before_requested;
-
- bool aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
- bool automatic_natural_points = (points_wanted == 0);
- bool relative_period_requested = false;
- bool natural_points = (options & RRDR_OPTION_NATURAL_POINTS) || automatic_natural_points;
- bool before_is_aligned_to_db_end = false;
-
- query_debug_log_init();
-
- if (ABS(before_requested) <= API_RELATIVE_TIME_MAX || ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
- relative_period_requested = true;
- natural_points = true;
- options |= RRDR_OPTION_NATURAL_POINTS;
- query_debug_log(":relative+natural");
- }
-
- // if the user wants virtual points, make sure we do it
- if (options & RRDR_OPTION_VIRTUAL_POINTS)
- natural_points = false;
-
- // set the right flag about natural and virtual points
- if (natural_points) {
- options |= RRDR_OPTION_NATURAL_POINTS;
-
- if (options & RRDR_OPTION_VIRTUAL_POINTS)
- options &= ~RRDR_OPTION_VIRTUAL_POINTS;
- }
- else {
- options |= RRDR_OPTION_VIRTUAL_POINTS;
-
- if (options & RRDR_OPTION_NATURAL_POINTS)
- options &= ~RRDR_OPTION_NATURAL_POINTS;
- }
-
- if (after_wanted == 0 || before_wanted == 0) {
- relative_period_requested = true;
-
- time_t first_entry_s = qt->db.first_time_s;
- time_t last_entry_s = qt->db.last_time_s;
-
- if (first_entry_s == 0 || last_entry_s == 0) {
- internal_error(true, "QUERY: no data detected on query '%s' (db first_entry_t = %ld, last_entry_t = %ld)", qt->id, first_entry_s, last_entry_s);
- after_wanted = qt->window.after;
- before_wanted = qt->window.before;
-
- if(after_wanted == before_wanted)
- after_wanted = before_wanted - update_every;
-
- if (points_wanted == 0) {
- points_wanted = (before_wanted - after_wanted) / update_every;
- query_debug_log(":zero points_wanted %zu", points_wanted);
- }
- }
- else {
- query_debug_log(":first_entry_t %ld, last_entry_t %ld", first_entry_s, last_entry_s);
-
- if (after_wanted == 0) {
- after_wanted = first_entry_s;
- query_debug_log(":zero after_wanted %ld", after_wanted);
- }
-
- if (before_wanted == 0) {
- before_wanted = last_entry_s;
- before_is_aligned_to_db_end = true;
- query_debug_log(":zero before_wanted %ld", before_wanted);
- }
-
- if (points_wanted == 0) {
- points_wanted = (last_entry_s - first_entry_s) / update_every;
- query_debug_log(":zero points_wanted %zu", points_wanted);
- }
- }
- }
-
- if (points_wanted == 0) {
- points_wanted = 600;
- query_debug_log(":zero600 points_wanted %zu", points_wanted);
- }
-
- // convert our before_wanted and after_wanted to absolute
- rrdr_relative_window_to_absolute_query(&after_wanted, &before_wanted, NULL, unittest_running);
- query_debug_log(":relative2absolute after %ld, before %ld", after_wanted, before_wanted);
-
- if (natural_points && (options & RRDR_OPTION_SELECTED_TIER) && tier > 0 && storage_tiers > 1) {
- update_every = rrdset_find_natural_update_every_for_timeframe(
- qt, after_wanted, before_wanted, points_wanted, options, tier);
-
- if (update_every <= 0) update_every = qt->db.minimum_latest_update_every_s;
- query_debug_log(":natural update every %ld", update_every);
- }
-
- // this is the update_every of the query
- // it may be different to the update_every of the database
- time_t query_granularity = (natural_points) ? update_every : 1;
- if (query_granularity <= 0) query_granularity = 1;
- query_debug_log(":query_granularity %ld", query_granularity);
-
- // align before_wanted and after_wanted to query_granularity
- if (before_wanted % query_granularity) {
- before_wanted -= before_wanted % query_granularity;
- query_debug_log(":granularity align before_wanted %ld", before_wanted);
- }
-
- if (after_wanted % query_granularity) {
- after_wanted -= after_wanted % query_granularity;
- query_debug_log(":granularity align after_wanted %ld", after_wanted);
- }
-
- // automatic_natural_points is set when the user wants all the points available in the database
- if (automatic_natural_points) {
- points_wanted = (before_wanted - after_wanted + 1) / query_granularity;
- if (unlikely(points_wanted <= 0)) points_wanted = 1;
- query_debug_log(":auto natural points_wanted %zu", points_wanted);
- }
-
- time_t duration = before_wanted - after_wanted;
-
- // if the resampling time is too big, extend the duration to the past
- if (unlikely(resampling_time_requested > duration)) {
- after_wanted = before_wanted - resampling_time_requested;
- duration = before_wanted - after_wanted;
- query_debug_log(":resampling after_wanted %ld", after_wanted);
- }
-
- // if the duration is not aligned to resampling time
- // extend the duration to the past, to avoid a gap at the chart
- // only when the missing duration is above 1/10th of a point
- if (resampling_time_requested > query_granularity && duration % resampling_time_requested) {
- time_t delta = duration % resampling_time_requested;
- if (delta > resampling_time_requested / 10) {
- after_wanted -= resampling_time_requested - delta;
- duration = before_wanted - after_wanted;
- query_debug_log(":resampling2 after_wanted %ld", after_wanted);
- }
- }
-
- // the available points of the query
- size_t points_available = (duration + 1) / query_granularity;
- if (unlikely(points_available <= 0)) points_available = 1;
- query_debug_log(":points_available %zu", points_available);
-
- if (points_wanted > points_available) {
- points_wanted = points_available;
- query_debug_log(":max points_wanted %zu", points_wanted);
- }
-
- if(points_wanted > 86400 && !unittest_running) {
- points_wanted = 86400;
- query_debug_log(":absolute max points_wanted %zu", points_wanted);
- }
-
- // calculate the desired grouping of source data points
- size_t group = points_available / points_wanted;
- if (group == 0) group = 1;
-
- // round "group" to the closest integer
- if (points_available % points_wanted > points_wanted / 2)
- group++;
-
- query_debug_log(":group %zu", group);
-
- if (points_wanted * group * query_granularity < (size_t)duration) {
- // the grouping we are going to do, is not enough
- // to cover the entire duration requested, so
- // we have to change the number of points, to make sure we will
- // respect the timeframe as closely as possibly
-
- // let's see how many points are the optimal
- points_wanted = points_available / group;
-
- if (points_wanted * group < points_available)
- points_wanted++;
-
- if (unlikely(points_wanted == 0))
- points_wanted = 1;
-
- query_debug_log(":optimal points %zu", points_wanted);
- }
-
- // resampling_time_requested enforces a certain grouping multiple
- NETDATA_DOUBLE resampling_divisor = 1.0;
- size_t resampling_group = 1;
- if (unlikely(resampling_time_requested > query_granularity)) {
- // the points we should group to satisfy gtime
- resampling_group = resampling_time_requested / query_granularity;
- if (unlikely(resampling_time_requested % query_granularity))
- resampling_group++;
-
- query_debug_log(":resampling group %zu", resampling_group);
-
- // adapt group according to resampling_group
- if (unlikely(group < resampling_group)) {
- group = resampling_group; // do not allow grouping below the desired one
- query_debug_log(":group less res %zu", group);
- }
- if (unlikely(group % resampling_group)) {
- group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
- query_debug_log(":group mod res %zu", group);
- }
-
- // resampling_divisor = group / resampling_group;
- resampling_divisor = (NETDATA_DOUBLE) (group * query_granularity) / (NETDATA_DOUBLE) resampling_time_requested;
- query_debug_log(":resampling divisor " NETDATA_DOUBLE_FORMAT, resampling_divisor);
- }
-
- // now that we have group, align the requested timeframe to fit it.
- if (aligned && before_wanted % (group * query_granularity)) {
- if (before_is_aligned_to_db_end)
- before_wanted -= before_wanted % (time_t)(group * query_granularity);
- else
- before_wanted += (time_t)(group * query_granularity) - before_wanted % (time_t)(group * query_granularity);
- query_debug_log(":align before_wanted %ld", before_wanted);
- }
-
- after_wanted = before_wanted - (time_t)(points_wanted * group * query_granularity) + query_granularity;
- query_debug_log(":final after_wanted %ld", after_wanted);
-
- duration = before_wanted - after_wanted;
- query_debug_log(":final duration %ld", duration + 1);
-
- query_debug_log_fin();
-
- internal_error(points_wanted != duration / (query_granularity * group) + 1,
- "QUERY: points_wanted %zu is not points %zu",
- points_wanted, (size_t)(duration / (query_granularity * group) + 1));
-
- internal_error(group < resampling_group,
- "QUERY: group %zu is less than the desired group points %zu",
- group, resampling_group);
-
- internal_error(group > resampling_group && group % resampling_group,
- "QUERY: group %zu is not a multiple of the desired group points %zu",
- group, resampling_group);
-
- // -------------------------------------------------------------------------
- // update QUERY_TARGET with our calculations
-
- qt->window.after = after_wanted;
- qt->window.before = before_wanted;
- qt->window.relative = relative_period_requested;
- qt->window.points = points_wanted;
- qt->window.group = group;
- qt->window.time_group_method = group_method;
- qt->window.time_group_options = qt->request.time_group_options;
- qt->window.query_granularity = query_granularity;
- qt->window.resampling_group = resampling_group;
- qt->window.resampling_divisor = resampling_divisor;
- qt->window.options = options;
- qt->window.tier = tier;
- qt->window.aligned = aligned;
-
- return true;
-}
-
-// ----------------------------------------------------------------------------
-// group by
-
-struct group_by_label_key {
- DICTIONARY *values;
-};
-
-static void group_by_label_key_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) {
- // add the key to our r->label_keys global keys dictionary
- DICTIONARY *label_keys = data;
- dictionary_set(label_keys, dictionary_acquired_item_name(item), NULL, 0);
-
- // create a dictionary for the values of this key
- struct group_by_label_key *k = value;
- k->values = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, 0);
-}
-
-static void group_by_label_key_delete_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
- struct group_by_label_key *k = value;
- dictionary_destroy(k->values);
-}
-
-static int rrdlabels_traversal_cb_to_group_by_label_key(const char *name, const char *value, RRDLABEL_SRC ls __maybe_unused, void *data) {
- DICTIONARY *dl = data;
- struct group_by_label_key *k = dictionary_set(dl, name, NULL, sizeof(struct group_by_label_key));
- dictionary_set(k->values, value, NULL, 0);
- return 1;
-}
-
-void rrdr_json_group_by_labels(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
- if(!r->label_keys || !r->dl)
- return;
-
- buffer_json_member_add_object(wb, key);
-
- void *t;
- dfe_start_read(r->label_keys, t) {
- buffer_json_member_add_array(wb, t_dfe.name);
-
- for(size_t d = 0; d < r->d ;d++) {
- if(!rrdr_dimension_should_be_exposed(r->od[d], options))
- continue;
-
- struct group_by_label_key *k = dictionary_get(r->dl[d], t_dfe.name);
- if(k) {
- buffer_json_add_array_item_array(wb);
- void *tt;
- dfe_start_read(k->values, tt) {
- buffer_json_add_array_item_string(wb, tt_dfe.name);
- }
- dfe_done(tt);
- buffer_json_array_close(wb);
- }
- else
- buffer_json_add_array_item_string(wb, NULL);
- }
-
- buffer_json_array_close(wb);
- }
- dfe_done(t);
-
- buffer_json_object_close(wb); // key
-}
-
-static void rrd2rrdr_set_timestamps(RRDR *r) {
- QUERY_TARGET *qt = r->internal.qt;
-
- internal_fatal(qt->window.points != r->n, "QUERY: mismatch to the number of points in qt and r");
-
- r->view.group = qt->window.group;
- r->view.update_every = (int) query_view_update_every(qt);
- r->view.before = qt->window.before;
- r->view.after = qt->window.after;
-
- r->time_grouping.points_wanted = qt->window.points;
- r->time_grouping.resampling_group = qt->window.resampling_group;
- r->time_grouping.resampling_divisor = qt->window.resampling_divisor;
-
- r->rows = qt->window.points;
-
- size_t points_wanted = qt->window.points;
- time_t after_wanted = qt->window.after;
- time_t before_wanted = qt->window.before; (void)before_wanted;
-
- time_t view_update_every = r->view.update_every;
- time_t query_granularity = (time_t)(r->view.update_every / r->view.group);
-
- size_t rrdr_line = 0;
- time_t first_point_end_time = after_wanted + view_update_every - query_granularity;
- time_t now_end_time = first_point_end_time;
-
- while (rrdr_line < points_wanted) {
- r->t[rrdr_line++] = now_end_time;
- now_end_time += view_update_every;
- }
-
- internal_fatal(r->t[0] != first_point_end_time, "QUERY: wrong first timestamp in the query");
- internal_error(r->t[points_wanted - 1] != before_wanted,
- "QUERY: wrong last timestamp in the query, expected %ld, found %ld",
- before_wanted, r->t[points_wanted - 1]);
-}
-
-static void query_group_by_make_dimension_key(BUFFER *key, RRDR_GROUP_BY group_by, size_t group_by_id, QUERY_TARGET *qt, QUERY_NODE *qn, QUERY_CONTEXT *qc, QUERY_INSTANCE *qi, QUERY_DIMENSION *qd __maybe_unused, QUERY_METRIC *qm, bool query_has_percentage_of_group) {
- buffer_flush(key);
- if(unlikely(!query_has_percentage_of_group && qm->status & RRDR_DIMENSION_HIDDEN)) {
- buffer_strcat(key, "__hidden_dimensions__");
- }
- else if(unlikely(group_by & RRDR_GROUP_BY_SELECTED)) {
- buffer_strcat(key, "selected");
- }
- else {
- if (group_by & RRDR_GROUP_BY_DIMENSION) {
- buffer_fast_strcat(key, "|", 1);
- buffer_strcat(key, query_metric_name(qt, qm));
- }
-
- if (group_by & (RRDR_GROUP_BY_INSTANCE|RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)) {
- buffer_fast_strcat(key, "|", 1);
- buffer_strcat(key, string2str(query_instance_id_fqdn(qi, qt->request.version)));
- }
-
- if (group_by & RRDR_GROUP_BY_LABEL) {
- RRDLABELS *labels = rrdinstance_acquired_labels(qi->ria);
- for (size_t l = 0; l < qt->group_by[group_by_id].used; l++) {
- buffer_fast_strcat(key, "|", 1);
- rrdlabels_get_value_to_buffer_or_unset(labels, key, qt->group_by[group_by_id].label_keys[l], "[unset]");
- }
- }
-
- if (group_by & RRDR_GROUP_BY_NODE) {
- buffer_fast_strcat(key, "|", 1);
- buffer_strcat(key, qn->rrdhost->machine_guid);
- }
-
- if (group_by & RRDR_GROUP_BY_CONTEXT) {
- buffer_fast_strcat(key, "|", 1);
- buffer_strcat(key, rrdcontext_acquired_id(qc->rca));
- }
-
- if (group_by & RRDR_GROUP_BY_UNITS) {
- buffer_fast_strcat(key, "|", 1);
- buffer_strcat(key, query_target_has_percentage_units(qt) ? "%" : rrdinstance_acquired_units(qi->ria));
- }
- }
-}
-
-static void query_group_by_make_dimension_id(BUFFER *key, RRDR_GROUP_BY group_by, size_t group_by_id, QUERY_TARGET *qt, QUERY_NODE *qn, QUERY_CONTEXT *qc, QUERY_INSTANCE *qi, QUERY_DIMENSION *qd __maybe_unused, QUERY_METRIC *qm, bool query_has_percentage_of_group) {
- buffer_flush(key);
- if(unlikely(!query_has_percentage_of_group && qm->status & RRDR_DIMENSION_HIDDEN)) {
- buffer_strcat(key, "__hidden_dimensions__");
- }
- else if(unlikely(group_by & RRDR_GROUP_BY_SELECTED)) {
- buffer_strcat(key, "selected");
- }
- else {
- if (group_by & RRDR_GROUP_BY_DIMENSION) {
- buffer_strcat(key, query_metric_name(qt, qm));
- }
-
- if (group_by & (RRDR_GROUP_BY_INSTANCE|RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)) {
- if (buffer_strlen(key) != 0)
- buffer_fast_strcat(key, ",", 1);
-
- if (group_by & RRDR_GROUP_BY_NODE)
- buffer_strcat(key, rrdinstance_acquired_id(qi->ria));
- else
- buffer_strcat(key, string2str(query_instance_id_fqdn(qi, qt->request.version)));
- }
-
- if (group_by & RRDR_GROUP_BY_LABEL) {
- RRDLABELS *labels = rrdinstance_acquired_labels(qi->ria);
- for (size_t l = 0; l < qt->group_by[group_by_id].used; l++) {
- if (buffer_strlen(key) != 0)
- buffer_fast_strcat(key, ",", 1);
- rrdlabels_get_value_to_buffer_or_unset(labels, key, qt->group_by[group_by_id].label_keys[l], "[unset]");
- }
- }
-
- if (group_by & RRDR_GROUP_BY_NODE) {
- if (buffer_strlen(key) != 0)
- buffer_fast_strcat(key, ",", 1);
-
- buffer_strcat(key, qn->rrdhost->machine_guid);
- }
-
- if (group_by & RRDR_GROUP_BY_CONTEXT) {
- if (buffer_strlen(key) != 0)
- buffer_fast_strcat(key, ",", 1);
-
- buffer_strcat(key, rrdcontext_acquired_id(qc->rca));
- }
-
- if (group_by & RRDR_GROUP_BY_UNITS) {
- if (buffer_strlen(key) != 0)
- buffer_fast_strcat(key, ",", 1);
-
- buffer_strcat(key, query_target_has_percentage_units(qt) ? "%" : rrdinstance_acquired_units(qi->ria));
- }
- }
-}
-
-static void query_group_by_make_dimension_name(BUFFER *key, RRDR_GROUP_BY group_by, size_t group_by_id, QUERY_TARGET *qt, QUERY_NODE *qn, QUERY_CONTEXT *qc, QUERY_INSTANCE *qi, QUERY_DIMENSION *qd __maybe_unused, QUERY_METRIC *qm, bool query_has_percentage_of_group) {
- buffer_flush(key);
- if(unlikely(!query_has_percentage_of_group && qm->status & RRDR_DIMENSION_HIDDEN)) {
- buffer_strcat(key, "__hidden_dimensions__");
- }
- else if(unlikely(group_by & RRDR_GROUP_BY_SELECTED)) {
- buffer_strcat(key, "selected");
- }
- else {
- if (group_by & RRDR_GROUP_BY_DIMENSION) {
- buffer_strcat(key, query_metric_name(qt, qm));
- }
-
- if (group_by & (RRDR_GROUP_BY_INSTANCE|RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)) {
- if (buffer_strlen(key) != 0)
- buffer_fast_strcat(key, ",", 1);
-
- if (group_by & RRDR_GROUP_BY_NODE)
- buffer_strcat(key, rrdinstance_acquired_name(qi->ria));
- else
- buffer_strcat(key, string2str(query_instance_name_fqdn(qi, qt->request.version)));
- }
-
- if (group_by & RRDR_GROUP_BY_LABEL) {
- RRDLABELS *labels = rrdinstance_acquired_labels(qi->ria);
- for (size_t l = 0; l < qt->group_by[group_by_id].used; l++) {
- if (buffer_strlen(key) != 0)
- buffer_fast_strcat(key, ",", 1);
- rrdlabels_get_value_to_buffer_or_unset(labels, key, qt->group_by[group_by_id].label_keys[l], "[unset]");
- }
- }
-
- if (group_by & RRDR_GROUP_BY_NODE) {
- if (buffer_strlen(key) != 0)
- buffer_fast_strcat(key, ",", 1);
-
- buffer_strcat(key, rrdhost_hostname(qn->rrdhost));
- }
-
- if (group_by & RRDR_GROUP_BY_CONTEXT) {
- if (buffer_strlen(key) != 0)
- buffer_fast_strcat(key, ",", 1);
-
- buffer_strcat(key, rrdcontext_acquired_id(qc->rca));
- }
-
- if (group_by & RRDR_GROUP_BY_UNITS) {
- if (buffer_strlen(key) != 0)
- buffer_fast_strcat(key, ",", 1);
-
- buffer_strcat(key, query_target_has_percentage_units(qt) ? "%" : rrdinstance_acquired_units(qi->ria));
- }
- }
-}
-
-struct rrdr_group_by_entry {
- size_t priority;
- size_t count;
- STRING *id;
- STRING *name;
- STRING *units;
- RRDR_DIMENSION_FLAGS od;
- DICTIONARY *dl;
-};
-
-static RRDR *rrd2rrdr_group_by_initialize(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
- RRDR *r_tmp = NULL;
- RRDR_OPTIONS options = qt->window.options;
-
- if(qt->request.version < 2) {
- // v1 query
- RRDR *r = rrdr_create(owa, qt, qt->query.used, qt->window.points);
- if(unlikely(!r)) {
- internal_error(true, "QUERY: cannot create RRDR for %s, after=%ld, before=%ld, dimensions=%u, points=%zu",
- qt->id, qt->window.after, qt->window.before, qt->query.used, qt->window.points);
- return NULL;
- }
- r->group_by.r = NULL;
-
- for(size_t d = 0; d < qt->query.used ; d++) {
- QUERY_METRIC *qm = query_metric(qt, d);
- QUERY_DIMENSION *qd = query_dimension(qt, qm->link.query_dimension_id);
- r->di[d] = rrdmetric_acquired_id_dup(qd->rma);
- r->dn[d] = rrdmetric_acquired_name_dup(qd->rma);
- }
-
- rrd2rrdr_set_timestamps(r);
- return r;
- }
- // v2 query
-
- // parse all the group-by label keys
- for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
- if (qt->request.group_by[g].group_by & RRDR_GROUP_BY_LABEL &&
- qt->request.group_by[g].group_by_label && *qt->request.group_by[g].group_by_label)
- qt->group_by[g].used = quoted_strings_splitter_query_group_by_label(
- qt->request.group_by[g].group_by_label, qt->group_by[g].label_keys,
- GROUP_BY_MAX_LABEL_KEYS);
-
- if (!qt->group_by[g].used)
- qt->request.group_by[g].group_by &= ~RRDR_GROUP_BY_LABEL;
- }
-
- // make sure there are valid group-by methods
- for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
- if(!(qt->request.group_by[g].group_by & SUPPORTED_GROUP_BY_METHODS))
- qt->request.group_by[g].group_by = (g == 0) ? RRDR_GROUP_BY_DIMENSION : RRDR_GROUP_BY_NONE;
- }
-
- bool query_has_percentage_of_group = query_target_has_percentage_of_group(qt);
-
- // merge all group-by options to upper levels,
- // so that the top level has all the groupings of the inner levels,
- // and each subsequent level has all the groupings of its inner levels.
- for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES - 1 ;g++) {
- if(qt->request.group_by[g].group_by == RRDR_GROUP_BY_NONE)
- continue;
-
- if(qt->request.group_by[g].group_by == RRDR_GROUP_BY_SELECTED) {
- for (size_t r = g + 1; r < MAX_QUERY_GROUP_BY_PASSES; r++)
- qt->request.group_by[r].group_by = RRDR_GROUP_BY_NONE;
- }
- else {
- for (size_t r = g + 1; r < MAX_QUERY_GROUP_BY_PASSES; r++) {
- if (qt->request.group_by[r].group_by == RRDR_GROUP_BY_NONE)
- continue;
-
- if (qt->request.group_by[r].group_by != RRDR_GROUP_BY_SELECTED) {
- if(qt->request.group_by[r].group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)
- qt->request.group_by[g].group_by |= RRDR_GROUP_BY_INSTANCE;
- else
- qt->request.group_by[g].group_by |= qt->request.group_by[r].group_by;
-
- if(qt->request.group_by[r].group_by & RRDR_GROUP_BY_LABEL) {
- for (size_t lr = 0; lr < qt->group_by[r].used; lr++) {
- bool found = false;
- for (size_t lg = 0; lg < qt->group_by[g].used; lg++) {
- if (strcmp(qt->group_by[g].label_keys[lg], qt->group_by[r].label_keys[lr]) == 0) {
- found = true;
- break;
- }
- }
-
- if (!found && qt->group_by[g].used < GROUP_BY_MAX_LABEL_KEYS * MAX_QUERY_GROUP_BY_PASSES)
- qt->group_by[g].label_keys[qt->group_by[g].used++] = qt->group_by[r].label_keys[lr];
- }
- }
- }
- }
- }
- }
-
- int added = 0;
- RRDR *first_r = NULL, *last_r = NULL;
- BUFFER *key = buffer_create(0, NULL);
- struct rrdr_group_by_entry *entries = onewayalloc_mallocz(owa, qt->query.used * sizeof(struct rrdr_group_by_entry));
- DICTIONARY *groups = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
- DICTIONARY *label_keys = NULL;
-
- for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
- RRDR_GROUP_BY group_by = qt->request.group_by[g].group_by;
- RRDR_GROUP_BY_FUNCTION aggregation_method = qt->request.group_by[g].aggregation;
-
- if(group_by == RRDR_GROUP_BY_NONE)
- break;
-
- memset(entries, 0, qt->query.used * sizeof(struct rrdr_group_by_entry));
- dictionary_flush(groups);
- added = 0;
-
- size_t hidden_dimensions = 0;
- bool final_grouping = (g == MAX_QUERY_GROUP_BY_PASSES - 1 || qt->request.group_by[g + 1].group_by == RRDR_GROUP_BY_NONE) ? true : false;
-
- if (final_grouping && (options & RRDR_OPTION_GROUP_BY_LABELS))
- label_keys = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, 0);
-
- QUERY_INSTANCE *last_qi = NULL;
- size_t priority = 0;
- time_t update_every_max = 0;
- for (size_t d = 0; d < qt->query.used; d++) {
- QUERY_METRIC *qm = query_metric(qt, d);
- QUERY_DIMENSION *qd = query_dimension(qt, qm->link.query_dimension_id);
- QUERY_INSTANCE *qi = query_instance(qt, qm->link.query_instance_id);
- QUERY_CONTEXT *qc = query_context(qt, qm->link.query_context_id);
- QUERY_NODE *qn = query_node(qt, qm->link.query_node_id);
-
- if (qi != last_qi) {
- last_qi = qi;
-
- time_t update_every = rrdinstance_acquired_update_every(qi->ria);
- if (update_every > update_every_max)
- update_every_max = update_every;
- }
-
- priority = qd->priority;
-
- if(qm->status & RRDR_DIMENSION_HIDDEN)
- hidden_dimensions++;
-
- // --------------------------------------------------------------------
- // generate the group by key
-
- query_group_by_make_dimension_key(key, group_by, g, qt, qn, qc, qi, qd, qm, query_has_percentage_of_group);
-
- // lookup the key in the dictionary
-
- int pos = -1;
- int *set = dictionary_set(groups, buffer_tostring(key), &pos, sizeof(pos));
- if (*set == -1) {
- // the key just added to the dictionary
-
- *set = pos = added++;
-
- // ----------------------------------------------------------------
- // generate the dimension id
-
- query_group_by_make_dimension_id(key, group_by, g, qt, qn, qc, qi, qd, qm, query_has_percentage_of_group);
- entries[pos].id = string_strdupz(buffer_tostring(key));
-
- // ----------------------------------------------------------------
- // generate the dimension name
-
- query_group_by_make_dimension_name(key, group_by, g, qt, qn, qc, qi, qd, qm, query_has_percentage_of_group);
- entries[pos].name = string_strdupz(buffer_tostring(key));
-
- // add the rest of the info
- entries[pos].units = rrdinstance_acquired_units_dup(qi->ria);
- entries[pos].priority = priority;
-
- if (label_keys) {
- entries[pos].dl = dictionary_create_advanced(
- DICT_OPTION_SINGLE_THREADED | DICT_OPTION_FIXED_SIZE | DICT_OPTION_DONT_OVERWRITE_VALUE,
- NULL, sizeof(struct group_by_label_key));
- dictionary_register_insert_callback(entries[pos].dl, group_by_label_key_insert_cb, label_keys);
- dictionary_register_delete_callback(entries[pos].dl, group_by_label_key_delete_cb, label_keys);
- }
- } else {
- // the key found in the dictionary
- pos = *set;
- }
-
- entries[pos].count++;
-
- if (unlikely(priority < entries[pos].priority))
- entries[pos].priority = priority;
-
- if(g > 0)
- last_r->dgbs[qm->grouped_as.slot] = pos;
- else
- qm->grouped_as.first_slot = pos;
-
- qm->grouped_as.slot = pos;
- qm->grouped_as.id = entries[pos].id;
- qm->grouped_as.name = entries[pos].name;
- qm->grouped_as.units = entries[pos].units;
-
- // copy the dimension flags decided by the query target
- // we need this, because if a dimension is explicitly selected
- // the query target adds to it the non-zero flag
- qm->status |= RRDR_DIMENSION_GROUPED;
-
- if(query_has_percentage_of_group)
- // when the query has percentage of group
- // there will be no hidden dimensions in the final query,
- // so we have to remove the hidden flag from all dimensions
- entries[pos].od |= qm->status & ~RRDR_DIMENSION_HIDDEN;
- else
- entries[pos].od |= qm->status;
-
- if (entries[pos].dl)
- rrdlabels_walkthrough_read(rrdinstance_acquired_labels(qi->ria),
- rrdlabels_traversal_cb_to_group_by_label_key, entries[pos].dl);
- }
-
- RRDR *r = rrdr_create(owa, qt, added, qt->window.points);
- if (!r) {
- internal_error(true,
- "QUERY: cannot create group by RRDR for %s, after=%ld, before=%ld, dimensions=%d, points=%zu",
- qt->id, qt->window.after, qt->window.before, added, qt->window.points);
- goto cleanup;
- }
- // prevent double free at cleanup in case of error
- added = 0;
-
- // link this RRDR
- if(!last_r)
- first_r = last_r = r;
- else
- last_r->group_by.r = r;
-
- last_r = r;
-
- rrd2rrdr_set_timestamps(r);
- r->dp = onewayalloc_callocz(owa, r->d, sizeof(*r->dp));
- r->dview = onewayalloc_callocz(owa, r->d, sizeof(*r->dview));
- r->dgbc = onewayalloc_callocz(owa, r->d, sizeof(*r->dgbc));
- r->gbc = onewayalloc_callocz(owa, r->n * r->d, sizeof(*r->gbc));
- r->dqp = onewayalloc_callocz(owa, r->d, sizeof(STORAGE_POINT));
-
- if(hidden_dimensions && ((group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE) || (aggregation_method == RRDR_GROUP_BY_FUNCTION_PERCENTAGE)))
- // this is where we are going to group the hidden dimensions
- r->vh = onewayalloc_mallocz(owa, r->n * r->d * sizeof(*r->vh));
-
- if(!final_grouping)
- // this is where we are going to store the slot in the next RRDR
- // that we are going to group by the dimension of this RRDR
- r->dgbs = onewayalloc_callocz(owa, r->d, sizeof(*r->dgbs));
-
- if (label_keys) {
- r->dl = onewayalloc_callocz(owa, r->d, sizeof(DICTIONARY *));
- r->label_keys = label_keys;
- label_keys = NULL;
- }
-
- // zero r (dimension options, names, and ids)
- // this is required, because group-by may lead to empty dimensions
- for (size_t d = 0; d < r->d; d++) {
- r->di[d] = entries[d].id;
- r->dn[d] = entries[d].name;
-
- r->od[d] = entries[d].od;
- r->du[d] = entries[d].units;
- r->dp[d] = entries[d].priority;
- r->dgbc[d] = entries[d].count;
-
- if (r->dl)
- r->dl[d] = entries[d].dl;
- }
-
- // initialize partial trimming
- r->partial_data_trimming.max_update_every = update_every_max * 2;
- r->partial_data_trimming.expected_after =
- (!query_target_aggregatable(qt) &&
- qt->window.before >= qt->window.now - r->partial_data_trimming.max_update_every) ?
- qt->window.before - r->partial_data_trimming.max_update_every :
- qt->window.before;
- r->partial_data_trimming.trimmed_after = qt->window.before;
-
- // make all values empty
- for (size_t i = 0; i != r->n; i++) {
- NETDATA_DOUBLE *cn = &r->v[i * r->d];
- RRDR_VALUE_FLAGS *co = &r->o[i * r->d];
- NETDATA_DOUBLE *ar = &r->ar[i * r->d];
- NETDATA_DOUBLE *vh = r->vh ? &r->vh[i * r->d] : NULL;
-
- for (size_t d = 0; d < r->d; d++) {
- cn[d] = NAN;
- ar[d] = 0.0;
- co[d] = RRDR_VALUE_EMPTY;
-
- if(vh)
- vh[d] = NAN;
- }
- }
- }
-
- if(!first_r || !last_r)
- goto cleanup;
-
- r_tmp = rrdr_create(owa, qt, 1, qt->window.points);
- if (!r_tmp) {
- internal_error(true,
- "QUERY: cannot create group by temporary RRDR for %s, after=%ld, before=%ld, dimensions=%d, points=%zu",
- qt->id, qt->window.after, qt->window.before, 1, qt->window.points);
- goto cleanup;
- }
- rrd2rrdr_set_timestamps(r_tmp);
- r_tmp->group_by.r = first_r;
-
-cleanup:
- if(!first_r || !last_r || !r_tmp) {
- if(r_tmp) {
- r_tmp->group_by.r = NULL;
- rrdr_free(owa, r_tmp);
- }
-
- if(first_r) {
- RRDR *r = first_r;
- while (r) {
- r_tmp = r->group_by.r;
- r->group_by.r = NULL;
- rrdr_free(owa, r);
- r = r_tmp;
- }
- }
-
- if(entries && added) {
- for (int d = 0; d < added; d++) {
- string_freez(entries[d].id);
- string_freez(entries[d].name);
- string_freez(entries[d].units);
- dictionary_destroy(entries[d].dl);
- }
- }
- dictionary_destroy(label_keys);
-
- first_r = last_r = r_tmp = NULL;
- }
-
- buffer_free(key);
- onewayalloc_freez(owa, entries);
- dictionary_destroy(groups);
-
- return r_tmp;
-}
-
-static void rrd2rrdr_group_by_add_metric(RRDR *r_dst, size_t d_dst, RRDR *r_tmp, size_t d_tmp,
- RRDR_GROUP_BY_FUNCTION group_by_aggregate_function,
- STORAGE_POINT *query_points, size_t pass __maybe_unused) {
- if(!r_tmp || r_dst == r_tmp || !(r_tmp->od[d_tmp] & RRDR_DIMENSION_QUERIED))
- return;
-
- internal_fatal(r_dst->n != r_tmp->n, "QUERY: group-by source and destination do not have the same number of rows");
- internal_fatal(d_dst >= r_dst->d, "QUERY: group-by destination dimension number exceeds destination RRDR size");
- internal_fatal(d_tmp >= r_tmp->d, "QUERY: group-by source dimension number exceeds source RRDR size");
- internal_fatal(!r_dst->dqp, "QUERY: group-by destination is not properly prepared (missing dqp array)");
- internal_fatal(!r_dst->gbc, "QUERY: group-by destination is not properly prepared (missing gbc array)");
-
- bool hidden_dimension_on_percentage_of_group = (r_tmp->od[d_tmp] & RRDR_DIMENSION_HIDDEN) && r_dst->vh;
-
- if(!hidden_dimension_on_percentage_of_group) {
- r_dst->od[d_dst] |= r_tmp->od[d_tmp];
- storage_point_merge_to(r_dst->dqp[d_dst], *query_points);
- }
-
- // do the group_by
- for(size_t i = 0; i != rrdr_rows(r_tmp) ; i++) {
-
- size_t idx_tmp = i * r_tmp->d + d_tmp;
- NETDATA_DOUBLE n_tmp = r_tmp->v[ idx_tmp ];
- RRDR_VALUE_FLAGS o_tmp = r_tmp->o[ idx_tmp ];
- NETDATA_DOUBLE ar_tmp = r_tmp->ar[ idx_tmp ];
-
- if(o_tmp & RRDR_VALUE_EMPTY)
- continue;
-
- size_t idx_dst = i * r_dst->d + d_dst;
- NETDATA_DOUBLE *cn = (hidden_dimension_on_percentage_of_group) ? &r_dst->vh[ idx_dst ] : &r_dst->v[ idx_dst ];
- RRDR_VALUE_FLAGS *co = &r_dst->o[ idx_dst ];
- NETDATA_DOUBLE *ar = &r_dst->ar[ idx_dst ];
- uint32_t *gbc = &r_dst->gbc[ idx_dst ];
-
- switch(group_by_aggregate_function) {
- default:
- case RRDR_GROUP_BY_FUNCTION_AVERAGE:
- case RRDR_GROUP_BY_FUNCTION_SUM:
- case RRDR_GROUP_BY_FUNCTION_PERCENTAGE:
- if(isnan(*cn))
- *cn = n_tmp;
- else
- *cn += n_tmp;
- break;
-
- case RRDR_GROUP_BY_FUNCTION_MIN:
- if(isnan(*cn) || n_tmp < *cn)
- *cn = n_tmp;
- break;
-
- case RRDR_GROUP_BY_FUNCTION_MAX:
- if(isnan(*cn) || n_tmp > *cn)
- *cn = n_tmp;
- break;
- }
-
- if(!hidden_dimension_on_percentage_of_group) {
- *co &= ~RRDR_VALUE_EMPTY;
- *co |= (o_tmp & (RRDR_VALUE_RESET | RRDR_VALUE_PARTIAL));
- *ar += ar_tmp;
- (*gbc)++;
- }
- }
-}
-
-static void rrdr2rrdr_group_by_partial_trimming(RRDR *r) {
- time_t trimmable_after = r->partial_data_trimming.expected_after;
-
- // find the point just before the trimmable ones
- ssize_t i = (ssize_t)r->n - 1;
- for( ; i >= 0 ;i--) {
- if (r->t[i] < trimmable_after)
- break;
- }
-
- if(unlikely(i < 0))
- return;
-
- // internal_error(true, "Found trimmable index %zd (from 0 to %zu)", i, r->n - 1);
-
- size_t last_row_gbc = 0;
- for (; i < (ssize_t)r->n; i++) {
- size_t row_gbc = 0;
- for (size_t d = 0; d < r->d; d++) {
- if (unlikely(!(r->od[d] & RRDR_DIMENSION_QUERIED)))
- continue;
-
- row_gbc += r->gbc[ i * r->d + d ];
- }
-
- // internal_error(true, "GBC of index %zd is %zu", i, row_gbc);
-
- if (unlikely(r->t[i] >= trimmable_after && (row_gbc < last_row_gbc || !row_gbc))) {
- // discard the rest of the points
- // internal_error(true, "Discarding points %zd to %zu", i, r->n - 1);
- r->partial_data_trimming.trimmed_after = r->t[i];
- r->rows = i;
- break;
- }
- else
- last_row_gbc = row_gbc;
- }
-}
-
-static void rrdr2rrdr_group_by_calculate_percentage_of_group(RRDR *r) {
- if(!r->vh)
- return;
-
- if(query_target_aggregatable(r->internal.qt) && query_has_group_by_aggregation_percentage(r->internal.qt))
- return;
-
- for(size_t i = 0; i < r->n ;i++) {
- NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
- NETDATA_DOUBLE *ch = &r->vh[ i * r->d ];
-
- for(size_t d = 0; d < r->d ;d++) {
- NETDATA_DOUBLE n = cn[d];
- NETDATA_DOUBLE h = ch[d];
-
- if(isnan(n))
- cn[d] = 0.0;
-
- else if(isnan(h))
- cn[d] = 100.0;
-
- else
- cn[d] = n * 100.0 / (n + h);
- }
- }
-}
-
-static void rrd2rrdr_convert_values_to_percentage_of_total(RRDR *r) {
- if(!(r->internal.qt->window.options & RRDR_OPTION_PERCENTAGE) || query_target_aggregatable(r->internal.qt))
- return;
-
- size_t global_min_max_values = 0;
- NETDATA_DOUBLE global_min = NAN, global_max = NAN;
-
- for(size_t i = 0; i != r->n ;i++) {
- NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
- RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
-
- NETDATA_DOUBLE total = 0;
- for (size_t d = 0; d < r->d; d++) {
- if (unlikely(!(r->od[d] & RRDR_DIMENSION_QUERIED)))
- continue;
-
- if(co[d] & RRDR_VALUE_EMPTY)
- continue;
-
- total += cn[d];
- }
-
- if(total == 0.0)
- total = 1.0;
-
- for (size_t d = 0; d < r->d; d++) {
- if (unlikely(!(r->od[d] & RRDR_DIMENSION_QUERIED)))
- continue;
-
- if(co[d] & RRDR_VALUE_EMPTY)
- continue;
-
- NETDATA_DOUBLE n = cn[d];
- n = cn[d] = n * 100.0 / total;
-
- if(unlikely(!global_min_max_values++))
- global_min = global_max = n;
- else {
- if(n < global_min)
- global_min = n;
- if(n > global_max)
- global_max = n;
- }
- }
- }
-
- r->view.min = global_min;
- r->view.max = global_max;
-
- if(!r->dview)
- // v1 query
- return;
-
- // v2 query
-
- for (size_t d = 0; d < r->d; d++) {
- if (unlikely(!(r->od[d] & RRDR_DIMENSION_QUERIED)))
- continue;
-
- size_t count = 0;
- NETDATA_DOUBLE min = 0.0, max = 0.0, sum = 0.0, ars = 0.0;
- for(size_t i = 0; i != r->rows ;i++) { // we use r->rows to respect trimming
- size_t idx = i * r->d + d;
-
- RRDR_VALUE_FLAGS o = r->o[ idx ];
-
- if (o & RRDR_VALUE_EMPTY)
- continue;
-
- NETDATA_DOUBLE ar = r->ar[ idx ];
- ars += ar;
-
- NETDATA_DOUBLE n = r->v[ idx ];
- sum += n;
-
- if(!count++)
- min = max = n;
- else {
- if(n < min)
- min = n;
- if(n > max)
- max = n;
- }
- }
-
- r->dview[d] = (STORAGE_POINT) {
- .sum = sum,
- .count = count,
- .min = min,
- .max = max,
- .anomaly_count = (size_t)(ars * (NETDATA_DOUBLE)count),
- };
- }
-}
-
-static RRDR *rrd2rrdr_group_by_finalize(RRDR *r_tmp) {
- QUERY_TARGET *qt = r_tmp->internal.qt;
-
- if(!r_tmp->group_by.r) {
- // v1 query
- rrd2rrdr_convert_values_to_percentage_of_total(r_tmp);
- return r_tmp;
- }
- // v2 query
-
- // do the additional passes on RRDRs
- RRDR *last_r = r_tmp->group_by.r;
- rrdr2rrdr_group_by_calculate_percentage_of_group(last_r);
-
- RRDR *r = last_r->group_by.r;
- size_t pass = 0;
- while(r) {
- pass++;
- for(size_t d = 0; d < last_r->d ;d++) {
- rrd2rrdr_group_by_add_metric(r, last_r->dgbs[d], last_r, d,
- qt->request.group_by[pass].aggregation,
- &last_r->dqp[d], pass);
- }
- rrdr2rrdr_group_by_calculate_percentage_of_group(r);
-
- last_r = r;
- r = last_r->group_by.r;
- }
-
- // free all RRDRs except the last one
- r = r_tmp;
- while(r != last_r) {
- r_tmp = r->group_by.r;
- r->group_by.r = NULL;
- rrdr_free(r->internal.owa, r);
- r = r_tmp;
- }
- r = last_r;
-
- // find the final aggregation
- RRDR_GROUP_BY_FUNCTION aggregation = qt->request.group_by[0].aggregation;
- for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++)
- if(qt->request.group_by[g].group_by != RRDR_GROUP_BY_NONE)
- aggregation = qt->request.group_by[g].aggregation;
-
- if(!query_target_aggregatable(qt) && r->partial_data_trimming.expected_after < qt->window.before)
- rrdr2rrdr_group_by_partial_trimming(r);
-
- // apply averaging, remove RRDR_VALUE_EMPTY, find the non-zero dimensions, min and max
- size_t global_min_max_values = 0;
- size_t dimensions_nonzero = 0;
- NETDATA_DOUBLE global_min = NAN, global_max = NAN;
- for (size_t d = 0; d < r->d; d++) {
- if (unlikely(!(r->od[d] & RRDR_DIMENSION_QUERIED)))
- continue;
-
- size_t points_nonzero = 0;
- NETDATA_DOUBLE min = 0, max = 0, sum = 0, ars = 0;
- size_t count = 0;
-
- for(size_t i = 0; i != r->n ;i++) {
- size_t idx = i * r->d + d;
-
- NETDATA_DOUBLE *cn = &r->v[ idx ];
- RRDR_VALUE_FLAGS *co = &r->o[ idx ];
- NETDATA_DOUBLE *ar = &r->ar[ idx ];
- uint32_t gbc = r->gbc[ idx ];
-
- if(likely(gbc)) {
- *co &= ~RRDR_VALUE_EMPTY;
-
- if(gbc != r->dgbc[d])
- *co |= RRDR_VALUE_PARTIAL;
-
- NETDATA_DOUBLE n;
-
- sum += *cn;
- ars += *ar;
-
- if(aggregation == RRDR_GROUP_BY_FUNCTION_AVERAGE && !query_target_aggregatable(qt))
- n = (*cn /= gbc);
- else
- n = *cn;
-
- if(!query_target_aggregatable(qt))
- *ar /= gbc;
-
- if(islessgreater(n, 0.0))
- points_nonzero++;
-
- if(unlikely(!count))
- min = max = n;
- else {
- if(n < min)
- min = n;
-
- if(n > max)
- max = n;
- }
-
- if(unlikely(!global_min_max_values++))
- global_min = global_max = n;
- else {
- if(n < global_min)
- global_min = n;
-
- if(n > global_max)
- global_max = n;
- }
-
- count += gbc;
- }
- }
-
- if(points_nonzero) {
- r->od[d] |= RRDR_DIMENSION_NONZERO;
- dimensions_nonzero++;
- }
-
- r->dview[d] = (STORAGE_POINT) {
- .sum = sum,
- .count = count,
- .min = min,
- .max = max,
- .anomaly_count = (size_t)(ars * RRDR_DVIEW_ANOMALY_COUNT_MULTIPLIER / 100.0),
- };
- }
-
- r->view.min = global_min;
- r->view.max = global_max;
-
- if(!dimensions_nonzero && (qt->window.options & RRDR_OPTION_NONZERO)) {
- // all dimensions are zero
- // remove the nonzero option
- qt->window.options &= ~RRDR_OPTION_NONZERO;
- }
-
- rrd2rrdr_convert_values_to_percentage_of_total(r);
-
- // update query instance counts in query host and query context
- {
- size_t h = 0, c = 0, i = 0;
- for(; h < qt->nodes.used ; h++) {
- QUERY_NODE *qn = &qt->nodes.array[h];
-
- for(; c < qt->contexts.used ;c++) {
- QUERY_CONTEXT *qc = &qt->contexts.array[c];
-
- if(!rrdcontext_acquired_belongs_to_host(qc->rca, qn->rrdhost))
- break;
-
- for(; i < qt->instances.used ;i++) {
- QUERY_INSTANCE *qi = &qt->instances.array[i];
-
- if(!rrdinstance_acquired_belongs_to_context(qi->ria, qc->rca))
- break;
-
- if(qi->metrics.queried) {
- qc->instances.queried++;
- qn->instances.queried++;
- }
- else if(qi->metrics.failed) {
- qc->instances.failed++;
- qn->instances.failed++;
- }
- }
- }
- }
- }
-
- return r;
-}
-
-// ----------------------------------------------------------------------------
-// query entry point
-
-RRDR *rrd2rrdr_legacy(
- ONEWAYALLOC *owa,
- RRDSET *st, size_t points, time_t after, time_t before,
- RRDR_TIME_GROUPING group_method, time_t resampling_time, RRDR_OPTIONS options, const char *dimensions,
- const char *group_options, time_t timeout_ms, size_t tier, QUERY_SOURCE query_source,
- STORAGE_PRIORITY priority) {
-
- QUERY_TARGET_REQUEST qtr = {
- .version = 1,
- .st = st,
- .points = points,
- .after = after,
- .before = before,
- .time_group_method = group_method,
- .resampling_time = resampling_time,
- .options = options,
- .dimensions = dimensions,
- .time_group_options = group_options,
- .timeout_ms = timeout_ms,
- .tier = tier,
- .query_source = query_source,
- .priority = priority,
- };
-
- QUERY_TARGET *qt = query_target_create(&qtr);
- RRDR *r = rrd2rrdr(owa, qt);
- if(!r) {
- query_target_release(qt);
- return NULL;
- }
-
- r->internal.release_with_rrdr_qt = qt;
- return r;
-}
-
-RRDR *rrd2rrdr(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
- if(!qt || !owa)
- return NULL;
-
- // qt.window members are the WANTED ones.
- // qt.request members are the REQUESTED ones.
-
- RRDR *r_tmp = rrd2rrdr_group_by_initialize(owa, qt);
- if(!r_tmp)
- return NULL;
-
- // the RRDR we group-by at
- RRDR *r = (r_tmp->group_by.r) ? r_tmp->group_by.r : r_tmp;
-
- // the final RRDR to return to callers
- RRDR *last_r = r_tmp;
- while(last_r->group_by.r)
- last_r = last_r->group_by.r;
-
- if(qt->window.relative)
- last_r->view.flags |= RRDR_RESULT_FLAG_RELATIVE;
- else
- last_r->view.flags |= RRDR_RESULT_FLAG_ABSOLUTE;
-
- // -------------------------------------------------------------------------
- // assign the processor functions
- rrdr_set_grouping_function(r_tmp, qt->window.time_group_method);
-
- // allocate any memory required by the grouping method
- r_tmp->time_grouping.create(r_tmp, qt->window.time_group_options);
-
- // -------------------------------------------------------------------------
- // do the work for each dimension
-
- time_t max_after = 0, min_before = 0;
- size_t max_rows = 0;
-
- long dimensions_used = 0, dimensions_nonzero = 0;
- size_t last_db_points_read = 0;
- size_t last_result_points_generated = 0;
-
- internal_fatal(released_ops, "QUERY: released_ops should be NULL when the query starts");
-
- QUERY_ENGINE_OPS **ops = NULL;
- if(qt->query.used)
- ops = onewayalloc_callocz(owa, qt->query.used, sizeof(QUERY_ENGINE_OPS *));
-
- size_t capacity = libuv_worker_threads * 10;
- size_t max_queries_to_prepare = (qt->query.used > (capacity - 1)) ? (capacity - 1) : qt->query.used;
- size_t queries_prepared = 0;
- while(queries_prepared < max_queries_to_prepare) {
- // preload another query
- ops[queries_prepared] = rrd2rrdr_query_ops_prep(r_tmp, queries_prepared);
- queries_prepared++;
- }
-
- QUERY_NODE *last_qn = NULL;
- usec_t last_ut = now_monotonic_usec();
- usec_t last_qn_ut = last_ut;
-
- for(size_t d = 0; d < qt->query.used ; d++) {
- QUERY_METRIC *qm = query_metric(qt, d);
- QUERY_DIMENSION *qd = query_dimension(qt, qm->link.query_dimension_id);
- QUERY_INSTANCE *qi = query_instance(qt, qm->link.query_instance_id);
- QUERY_CONTEXT *qc = query_context(qt, qm->link.query_context_id);
- QUERY_NODE *qn = query_node(qt, qm->link.query_node_id);
-
- usec_t now_ut = last_ut;
- if(qn != last_qn) {
- if(last_qn)
- last_qn->duration_ut = now_ut - last_qn_ut;
-
- last_qn = qn;
- last_qn_ut = now_ut;
- }
-
- if(queries_prepared < qt->query.used) {
- // preload another query
- ops[queries_prepared] = rrd2rrdr_query_ops_prep(r_tmp, queries_prepared);
- queries_prepared++;
- }
-
- size_t dim_in_rrdr_tmp = (r_tmp != r) ? 0 : d;
-
- // set the query target dimension options to rrdr
- r_tmp->od[dim_in_rrdr_tmp] = qm->status;
-
- // reset the grouping for the new dimension
- r_tmp->time_grouping.reset(r_tmp);
-
- if(ops[d]) {
- rrd2rrdr_query_execute(r_tmp, dim_in_rrdr_tmp, ops[d]);
- r_tmp->od[dim_in_rrdr_tmp] |= RRDR_DIMENSION_QUERIED;
-
- now_ut = now_monotonic_usec();
- qm->duration_ut = now_ut - last_ut;
- last_ut = now_ut;
-
- if(r_tmp != r) {
- // copy back whatever got updated from the temporary r
-
- // the query updates RRDR_DIMENSION_NONZERO
- qm->status = r_tmp->od[dim_in_rrdr_tmp];
-
- // the query updates these
- r->view.min = r_tmp->view.min;
- r->view.max = r_tmp->view.max;
- r->view.after = r_tmp->view.after;
- r->view.before = r_tmp->view.before;
- r->rows = r_tmp->rows;
-
- rrd2rrdr_group_by_add_metric(r, qm->grouped_as.first_slot, r_tmp, dim_in_rrdr_tmp,
- qt->request.group_by[0].aggregation, &qm->query_points, 0);
- }
-
- rrd2rrdr_query_ops_release(ops[d]); // reuse this ops allocation
- ops[d] = NULL;
-
- qi->metrics.queried++;
- qc->metrics.queried++;
- qn->metrics.queried++;
-
- qd->status |= QUERY_STATUS_QUERIED;
- qm->status |= RRDR_DIMENSION_QUERIED;
-
- if(qt->request.version >= 2) {
- // we need to make the query points positive now
- // since we will aggregate it across multiple dimensions
- storage_point_make_positive(qm->query_points);
- storage_point_merge_to(qi->query_points, qm->query_points);
- storage_point_merge_to(qc->query_points, qm->query_points);
- storage_point_merge_to(qn->query_points, qm->query_points);
- storage_point_merge_to(qt->query_points, qm->query_points);
- }
- }
- else {
- qi->metrics.failed++;
- qc->metrics.failed++;
- qn->metrics.failed++;
-
- qd->status |= QUERY_STATUS_FAILED;
- qm->status |= RRDR_DIMENSION_FAILED;
-
- continue;
- }
-
- global_statistics_rrdr_query_completed(
- 1,
- r_tmp->stats.db_points_read - last_db_points_read,
- r_tmp->stats.result_points_generated - last_result_points_generated,
- qt->request.query_source);
-
- last_db_points_read = r_tmp->stats.db_points_read;
- last_result_points_generated = r_tmp->stats.result_points_generated;
-
- if(qm->status & RRDR_DIMENSION_NONZERO)
- dimensions_nonzero++;
-
- // verify all dimensions are aligned
- if(unlikely(!dimensions_used)) {
- min_before = r->view.before;
- max_after = r->view.after;
- max_rows = r->rows;
- }
- else {
- if(r->view.after != max_after) {
- internal_error(true, "QUERY: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- rrdinstance_acquired_id(qi->ria), (size_t)max_after, rrdmetric_acquired_id(qd->rma), (size_t)r->view.after);
-
- r->view.after = (r->view.after > max_after) ? r->view.after : max_after;
- }
-
- if(r->view.before != min_before) {
- internal_error(true, "QUERY: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- rrdinstance_acquired_id(qi->ria), (size_t)min_before, rrdmetric_acquired_id(qd->rma), (size_t)r->view.before);
-
- r->view.before = (r->view.before < min_before) ? r->view.before : min_before;
- }
-
- if(r->rows != max_rows) {
- internal_error(true, "QUERY: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- rrdinstance_acquired_id(qi->ria), (size_t)max_rows, rrdmetric_acquired_id(qd->rma), (size_t)r->rows);
-
- r->rows = (r->rows > max_rows) ? r->rows : max_rows;
- }
- }
-
- dimensions_used++;
-
- bool cancel = false;
- if (qt->request.interrupt_callback && qt->request.interrupt_callback(qt->request.interrupt_callback_data)) {
- cancel = true;
- nd_log(NDLS_ACCESS, NDLP_NOTICE, "QUERY INTERRUPTED");
- }
-
- if (qt->request.timeout_ms && ((NETDATA_DOUBLE)(now_ut - qt->timings.received_ut) / 1000.0) > (NETDATA_DOUBLE)qt->request.timeout_ms) {
- cancel = true;
- nd_log(NDLS_ACCESS, NDLP_WARNING, "QUERY CANCELED RUNTIME EXCEEDED %0.2f ms (LIMIT %lld ms)",
- (NETDATA_DOUBLE)(now_ut - qt->timings.received_ut) / 1000.0, (long long)qt->request.timeout_ms);
- }
-
- if(cancel) {
- r->view.flags |= RRDR_RESULT_FLAG_CANCEL;
-
- for(size_t i = d + 1; i < queries_prepared ; i++) {
- if(ops[i]) {
- query_planer_finalize_remaining_plans(ops[i]);
- rrd2rrdr_query_ops_release(ops[i]);
- ops[i] = NULL;
- }
- }
-
- break;
- }
- }
-
- // free all resources used by the grouping method
- r_tmp->time_grouping.free(r_tmp);
-
- // get the final RRDR to send to the caller
- r = rrd2rrdr_group_by_finalize(r_tmp);
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if (dimensions_used && !(r->view.flags & RRDR_RESULT_FLAG_CANCEL)) {
- if(r->internal.log)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.time_group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
- qt->window.after, qt->request.after, qt->window.before, qt->request.before,
- qt->request.points, qt->window.points, /*after_slot, before_slot,*/
- r->internal.log);
-
- if(r->rows != qt->window.points)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.time_group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
- qt->window.after, qt->request.after, qt->window.before, qt->request.before,
- qt->request.points, qt->window.points, /*after_slot, before_slot,*/
- "got 'points' is not wanted 'points'");
-
- if(qt->window.aligned && (r->view.before % query_view_update_every(qt)) != 0)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.time_group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
- qt->window.after, qt->request.after, qt->window.before, qt->request.before,
- qt->request.points, qt->window.points, /*after_slot, before_slot,*/
- "'before' is not aligned but alignment is required");
-
- // 'after' should not be aligned, since we start inside the first group
- //if(qt->window.aligned && (r->after % group) != 0)
- // rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group, qt->window.after, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
-
- if(r->view.before != qt->window.before)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.time_group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
- qt->window.after, qt->request.after, qt->window.before, qt->request.before,
- qt->request.points, qt->window.points, /*after_slot, before_slot,*/
- "chart is not aligned to requested 'before'");
-
- if(r->view.before != qt->window.before)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.time_group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
- qt->window.after, qt->request.after, qt->window.before, qt->request.before,
- qt->request.points, qt->window.points, /*after_slot, before_slot,*/
- "got 'before' is not wanted 'before'");
-
- // reported 'after' varies, depending on group
- if(r->view.after != qt->window.after)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.time_group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
- qt->window.after, qt->request.after, qt->window.before, qt->request.before,
- qt->request.points, qt->window.points, /*after_slot, before_slot,*/
- "got 'after' is not wanted 'after'");
-
- }
-#endif
-
- // free the query pipelining ops
- for(size_t d = 0; d < qt->query.used ; d++) {
- rrd2rrdr_query_ops_release(ops[d]);
- ops[d] = NULL;
- }
- rrd2rrdr_query_ops_freeall(r);
- internal_fatal(released_ops, "QUERY: released_ops should be NULL when the query ends");
-
- onewayalloc_freez(owa, ops);
-
- if(likely(dimensions_used && (qt->window.options & RRDR_OPTION_NONZERO) && !dimensions_nonzero))
- // when all the dimensions are zero, we should return all of them
- qt->window.options &= ~RRDR_OPTION_NONZERO;
-
- qt->timings.executed_ut = now_monotonic_usec();
-
- return r;
-}
diff --git a/web/api/queries/query.h b/web/api/queries/query.h
deleted file mode 100644
index 5eabb6c03..000000000
--- a/web/api/queries/query.h
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_API_DATA_QUERY_H
-#define NETDATA_API_DATA_QUERY_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef enum rrdr_time_grouping {
- RRDR_GROUPING_UNDEFINED = 0,
- RRDR_GROUPING_AVERAGE,
- RRDR_GROUPING_MIN,
- RRDR_GROUPING_MAX,
- RRDR_GROUPING_SUM,
- RRDR_GROUPING_INCREMENTAL_SUM,
- RRDR_GROUPING_TRIMMED_MEAN1,
- RRDR_GROUPING_TRIMMED_MEAN2,
- RRDR_GROUPING_TRIMMED_MEAN3,
- RRDR_GROUPING_TRIMMED_MEAN,
- RRDR_GROUPING_TRIMMED_MEAN10,
- RRDR_GROUPING_TRIMMED_MEAN15,
- RRDR_GROUPING_TRIMMED_MEAN20,
- RRDR_GROUPING_TRIMMED_MEAN25,
- RRDR_GROUPING_MEDIAN,
- RRDR_GROUPING_TRIMMED_MEDIAN1,
- RRDR_GROUPING_TRIMMED_MEDIAN2,
- RRDR_GROUPING_TRIMMED_MEDIAN3,
- RRDR_GROUPING_TRIMMED_MEDIAN5,
- RRDR_GROUPING_TRIMMED_MEDIAN10,
- RRDR_GROUPING_TRIMMED_MEDIAN15,
- RRDR_GROUPING_TRIMMED_MEDIAN20,
- RRDR_GROUPING_TRIMMED_MEDIAN25,
- RRDR_GROUPING_PERCENTILE25,
- RRDR_GROUPING_PERCENTILE50,
- RRDR_GROUPING_PERCENTILE75,
- RRDR_GROUPING_PERCENTILE80,
- RRDR_GROUPING_PERCENTILE90,
- RRDR_GROUPING_PERCENTILE,
- RRDR_GROUPING_PERCENTILE97,
- RRDR_GROUPING_PERCENTILE98,
- RRDR_GROUPING_PERCENTILE99,
- RRDR_GROUPING_STDDEV,
- RRDR_GROUPING_CV,
- RRDR_GROUPING_SES,
- RRDR_GROUPING_DES,
- RRDR_GROUPING_COUNTIF,
-} RRDR_TIME_GROUPING;
-
-const char *time_grouping_method2string(RRDR_TIME_GROUPING group);
-void time_grouping_init(void);
-RRDR_TIME_GROUPING time_grouping_parse(const char *name, RRDR_TIME_GROUPING def);
-const char *time_grouping_tostring(RRDR_TIME_GROUPING group);
-
-typedef enum rrdr_group_by {
- RRDR_GROUP_BY_NONE = 0,
- RRDR_GROUP_BY_SELECTED = (1 << 0),
- RRDR_GROUP_BY_DIMENSION = (1 << 1),
- RRDR_GROUP_BY_INSTANCE = (1 << 2),
- RRDR_GROUP_BY_LABEL = (1 << 3),
- RRDR_GROUP_BY_NODE = (1 << 4),
- RRDR_GROUP_BY_CONTEXT = (1 << 5),
- RRDR_GROUP_BY_UNITS = (1 << 6),
- RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE = (1 << 7),
-} RRDR_GROUP_BY;
-
-#define SUPPORTED_GROUP_BY_METHODS (\
- RRDR_GROUP_BY_SELECTED |\
- RRDR_GROUP_BY_DIMENSION |\
- RRDR_GROUP_BY_INSTANCE |\
- RRDR_GROUP_BY_LABEL |\
- RRDR_GROUP_BY_NODE |\
- RRDR_GROUP_BY_CONTEXT |\
- RRDR_GROUP_BY_UNITS |\
- RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE \
-)
-
-struct web_buffer;
-
-RRDR_GROUP_BY group_by_parse(char *s);
-void buffer_json_group_by_to_array(struct web_buffer *wb, RRDR_GROUP_BY group_by);
-
-typedef enum rrdr_group_by_function {
- RRDR_GROUP_BY_FUNCTION_AVERAGE = 0,
- RRDR_GROUP_BY_FUNCTION_MIN,
- RRDR_GROUP_BY_FUNCTION_MAX,
- RRDR_GROUP_BY_FUNCTION_SUM,
- RRDR_GROUP_BY_FUNCTION_PERCENTAGE,
-} RRDR_GROUP_BY_FUNCTION;
-
-RRDR_GROUP_BY_FUNCTION group_by_aggregate_function_parse(const char *s);
-const char *group_by_aggregate_function_to_string(RRDR_GROUP_BY_FUNCTION group_by_function);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif //NETDATA_API_DATA_QUERY_H
diff --git a/web/api/queries/ses/Makefile.am b/web/api/queries/ses/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/queries/ses/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/queries/ses/README.md b/web/api/queries/ses/README.md
deleted file mode 100644
index a06f646ef..000000000
--- a/web/api/queries/ses/README.md
+++ /dev/null
@@ -1,65 +0,0 @@
-<!--
-title: "Single (or Simple) Exponential Smoothing (`ses`)"
-sidebar_label: "Single (or Simple) Exponential Smoothing (`ses`)"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/ses/README.md
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Queries"
--->
-
-# Single (or Simple) Exponential Smoothing (`ses`)
-
-> This query is also available as `ema` and `ewma`.
-
-An exponential moving average (`ema`), also known as an exponentially weighted moving average (`ewma`)
-is a first-order infinite impulse response filter that applies weighting factors which decrease
-exponentially. The weighting for each older datum decreases exponentially, never reaching zero.
-
-In simple terms, this is like an average value, but more recent values are given more weight.
-
-Netdata automatically adjusts the weight (`alpha`) based on the number of values processed,
-using the formula:
-
-```
-window = max(number of values, 15)
-alpha = 2 / (window + 1)
-```
-
-You can change the fixed value `15` by setting in `netdata.conf`:
-
-```
-[web]
- ses max window = 15
-```
-
-## how to use
-
-Use it in alerts like this:
-
-```
- alarm: my_alert
- on: my_chart
-lookup: ses -1m unaligned of my_dimension
- warn: $this > 1000
-```
-
-`ses` does not change the units. For example, if the chart units is `requests/sec`, the exponential
-moving average will be again expressed in the same units.
-
-It can also be used in APIs and badges as `&group=ses` in the URL.
-
-## Examples
-
-Examining last 1 minute `successful` web server responses:
-
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=single+exponential+smoothing&value_color=orange)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
-
-## References
-
-- <https://en.wikipedia.org/wiki/Moving_average#exponential-moving-average>
-- <https://en.wikipedia.org/wiki/Exponential_smoothing>.
-
-
diff --git a/web/api/queries/stddev/Makefile.am b/web/api/queries/stddev/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/queries/stddev/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/queries/stddev/README.md b/web/api/queries/stddev/README.md
deleted file mode 100644
index 3f751a6e1..000000000
--- a/web/api/queries/stddev/README.md
+++ /dev/null
@@ -1,97 +0,0 @@
-<!--
-title: "standard deviation (`stddev`)"
-sidebar_label: "standard deviation (`stddev`)"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/stddev/README.md
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Queries"
--->
-
-# standard deviation (`stddev`)
-
-The standard deviation is a measure that is used to quantify the amount of variation or dispersion
-of a set of data values.
-
-A low standard deviation indicates that the data points tend to be close to the mean (also called the
-expected value) of the set, while a high standard deviation indicates that the data points are spread
-out over a wider range of values.
-
-## how to use
-
-Use it in alerts like this:
-
-```
- alarm: my_alert
- on: my_chart
-lookup: stddev -1m unaligned of my_dimension
- warn: $this > 1000
-```
-
-`stdev` does not change the units. For example, if the chart units is `requests/sec`, the standard
-deviation will be again expressed in the same units.
-
-It can also be used in APIs and badges as `&group=stddev` in the URL.
-
-## Examples
-
-Examining last 1 minute `successful` web server responses:
-
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=stddev&after=-60&label=standard+deviation&value_color=orange)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=max&after=-60&label=max)
-
-## References
-
-Check <https://en.wikipedia.org/wiki/Standard_deviation>.
-
----
-
-# Coefficient of variation (`cv`)
-
-> This query is also available as `rsd`.
-
-The coefficient of variation (`cv`), also known as relative standard deviation (`rsd`),
-is a standardized measure of dispersion of a probability distribution or frequency distribution.
-
-It is defined as the ratio of the **standard deviation** to the **mean**.
-
-In simple terms, it gives the percentage of change. So, if the average value of a metric is 1000
-and its standard deviation is 100 (meaning that it variates from 900 to 1100), then `cv` is 10%.
-
-This is an easy way to check the % variation, without using absolute values.
-
-For example, you may trigger an alert if your web server requests/sec `cv` is above 20 (`%`)
-over the last minute. So if your web server was serving 1000 reqs/sec over the last minute,
-it will trigger the alert if had spikes below 800/sec or above 1200/sec.
-
-## how to use
-
-Use it in alerts like this:
-
-```
- alarm: my_alert
- on: my_chart
-lookup: cv -1m unaligned of my_dimension
- units: %
- warn: $this > 20
-```
-
-The units reported by `cv` is always `%`.
-
-It can also be used in APIs and badges as `&group=cv` in the URL.
-
-## Examples
-
-Examining last 1 minute `successful` web server responses:
-
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=average&after=-60&label=average&value_color=yellow)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=cv&after=-60&label=coefficient+of+variation&value_color=orange&units=pcent)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&dimensions=success&group=max&after=-60&label=max)
-
-## References
-
-Check <https://en.wikipedia.org/wiki/Coefficient_of_variation>.
-
-
diff --git a/web/api/queries/sum/Makefile.am b/web/api/queries/sum/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/queries/sum/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/queries/sum/README.md b/web/api/queries/sum/README.md
deleted file mode 100644
index 62e18acab..000000000
--- a/web/api/queries/sum/README.md
+++ /dev/null
@@ -1,45 +0,0 @@
-<!--
-title: "Sum"
-sidebar_label: "Sum"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/sum/README.md
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Queries"
--->
-
-# Sum
-
-This module sums all the values in the time-frame requested.
-
-You can use `sum` to find the volume of something over a period.
-
-## how to use
-
-Use it in alarms like this:
-
-```
- alarm: my_alarm
- on: my_chart
-lookup: sum -1m unaligned of my_dimension
- warn: $this > 1000
-```
-
-`sum` does not change the units. For example, if the chart units is `requests/sec`, the result
-will be again expressed in the same units.
-
-It can also be used in APIs and badges as `&group=sum` in the URL.
-
-## Examples
-
-Examining last 1 minute `successful` web server responses:
-
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=sum&after=-60&label=1m+sum&value_color=orange&units=requests)
-
-## References
-
-- <https://en.wikipedia.org/wiki/Summation>.
-
-
diff --git a/web/api/queries/trimmed_mean/Makefile.am b/web/api/queries/trimmed_mean/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/web/api/queries/trimmed_mean/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/web/api/queries/trimmed_mean/README.md b/web/api/queries/trimmed_mean/README.md
deleted file mode 100644
index 328c44942..000000000
--- a/web/api/queries/trimmed_mean/README.md
+++ /dev/null
@@ -1,60 +0,0 @@
-<!--
-title: "Trimmed Mean"
-sidebar_label: "Trimmed Mean"
-description: "Use trimmed-mean in API queries and health entities to find the average value from a sample, eliminating any unwanted spikes in the returned metrics."
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/trimmed_mean/README.md
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/Web/Api/Queries"
--->
-
-# Trimmed Mean
-
-The trimmed mean is the average value of a series excluding the smallest and biggest points.
-
-Netdata applies linear interpolation on the last point, if the percentage requested to be excluded does not give a
-round number of points.
-
-The following percentile aliases are defined:
-
-- `trimmed-mean1`
-- `trimmed-mean2`
-- `trimmed-mean3`
-- `trimmed-mean5`
-- `trimmed-mean10`
-- `trimmed-mean15`
-- `trimmed-mean20`
-- `trimmed-mean25`
-
-The default `trimmed-mean` is an alias for `trimmed-mean5`.
-Any percentage may be requested using the `group_options` query parameter.
-
-## how to use
-
-Use it in alerts like this:
-
-```
- alarm: my_alert
- on: my_chart
-lookup: trimmed-mean5 -1m unaligned of my_dimension
- warn: $this > 1000
-```
-
-`trimmed-mean` does not change the units. For example, if the chart units is `requests/sec`, the result
-will be again expressed in the same units.
-
-It can also be used in APIs and badges as `&group=trimmed-mean` in the URL and the additional parameter `group_options`
-may be used to request any percentage (e.g. `&group=trimmed-mean&group_options=29`).
-
-## Examples
-
-Examining last 1 minute `successful` web server responses:
-
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=trimmed-mean5&after=-60&label=trimmed-mean5&value_color=orange)
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
-
-## References
-
-- <https://en.wikipedia.org/wiki/Truncated_mean>.
diff --git a/web/api/web_api.h b/web/api/web_api.h
deleted file mode 100644
index a6b3716b7..000000000
--- a/web/api/web_api.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_WEB_API_H
-#define NETDATA_WEB_API_H 1
-
-#include "daemon/common.h"
-#include "web/api/badges/web_buffer_svg.h"
-#include "web/api/ilove/ilove.h"
-#include "web/api/formatters/rrd2json.h"
-#include "web/api/health/health_cmdapi.h"
-#include "web/api/queries/weights.h"
-
-extern bool netdata_is_protected_by_bearer;
-extern DICTIONARY *netdata_authorized_bearers;
-typedef enum __attribute__((packed)) {
- BEARER_STATUS_NO_BEARER_IN_HEADERS,
- BEARER_STATUS_BEARER_DOES_NOT_FIT,
- BEARER_STATUS_NOT_PARSABLE,
- BEARER_STATUS_EXTRACTED_FROM_HEADER,
- BEARER_STATUS_NO_BEARERS_DICTIONARY,
- BEARER_STATUS_NOT_FOUND_IN_DICTIONARY,
- BEARER_STATUS_EXPIRED,
- BEARER_STATUS_AVAILABLE_AND_VALIDATED,
-} BEARER_STATUS;
-
-BEARER_STATUS api_check_bearer_token(struct web_client *w);
-BEARER_STATUS extract_bearer_token_from_request(struct web_client *w, char *dst, size_t dst_len);
-
-struct web_api_command {
- const char *command;
- uint32_t hash;
- WEB_CLIENT_ACL acl;
- int (*callback)(RRDHOST *host, struct web_client *w, char *url);
- unsigned int allow_subpaths;
-};
-
-struct web_client;
-
-int web_client_api_request_vX(RRDHOST *host, struct web_client *w, char *url_path_endpoint, struct web_api_command *api_commands);
-
-static inline void fix_google_param(char *s) {
- if(unlikely(!s || !*s)) return;
-
- for( ; *s ;s++) {
- if(!isalnum(*s) && *s != '.' && *s != '_' && *s != '-')
- *s = '_';
- }
-}
-
-int web_client_api_request_weights(RRDHOST *host, struct web_client *w, char *url, WEIGHTS_METHOD method, WEIGHTS_FORMAT format, size_t api_version);
-
-bool web_client_interrupt_callback(void *data);
-
-#include "web_api_v1.h"
-#include "web_api_v2.h"
-
-#endif //NETDATA_WEB_API_H
diff --git a/web/api/web_api_v2.c b/web/api/web_api_v2.c
deleted file mode 100644
index 9daf80b9d..000000000
--- a/web/api/web_api_v2.c
+++ /dev/null
@@ -1,884 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "web_api_v2.h"
-#include "../rtc/webrtc.h"
-
-#define BEARER_TOKEN_EXPIRATION 86400
-
-struct bearer_token {
- time_t created_s;
- time_t expires_s;
-};
-
-static void bearer_token_cleanup(void) {
- static time_t attempts = 0;
-
- if(++attempts % 1000 != 0)
- return;
-
- time_t now_s = now_monotonic_sec();
-
- struct bearer_token *z;
- dfe_start_read(netdata_authorized_bearers, z) {
- if(z->expires_s < now_s)
- dictionary_del(netdata_authorized_bearers, z_dfe.name);
- }
- dfe_done(z);
-
- dictionary_garbage_collect(netdata_authorized_bearers);
-}
-
-void bearer_tokens_init(void) {
- netdata_authorized_bearers = dictionary_create_advanced(
- DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE,
- NULL, sizeof(struct bearer_token));
-}
-
-static time_t bearer_get_token(uuid_t *uuid) {
- char uuid_str[UUID_STR_LEN];
-
- uuid_generate_random(*uuid);
- uuid_unparse_lower(*uuid, uuid_str);
-
- struct bearer_token t = { 0 }, *z;
- z = dictionary_set(netdata_authorized_bearers, uuid_str, &t, sizeof(t));
- if(!z->created_s) {
- z->created_s = now_monotonic_sec();
- z->expires_s = z->created_s + BEARER_TOKEN_EXPIRATION;
- }
-
- bearer_token_cleanup();
-
- return now_realtime_sec() + BEARER_TOKEN_EXPIRATION;
-}
-
-#define HTTP_REQUEST_AUTHORIZATION_BEARER "\r\nAuthorization: Bearer "
-#define HTTP_REQUEST_X_NETDATA_AUTH_BEARER "\r\nX-Netdata-Auth: Bearer "
-
-BEARER_STATUS extract_bearer_token_from_request(struct web_client *w, char *dst, size_t dst_len) {
- const char *req = buffer_tostring(w->response.data);
- size_t req_len = buffer_strlen(w->response.data);
- const char *bearer = NULL;
- const char *bearer_end = NULL;
-
- bearer = strcasestr(req, HTTP_REQUEST_X_NETDATA_AUTH_BEARER);
- if(bearer)
- bearer_end = bearer + sizeof(HTTP_REQUEST_X_NETDATA_AUTH_BEARER) - 1;
- else {
- bearer = strcasestr(req, HTTP_REQUEST_AUTHORIZATION_BEARER);
- if(bearer)
- bearer_end = bearer + sizeof(HTTP_REQUEST_AUTHORIZATION_BEARER) - 1;
- }
-
- if(!bearer || !bearer_end)
- return BEARER_STATUS_NO_BEARER_IN_HEADERS;
-
- const char *token_start = bearer_end;
-
- while(isspace(*token_start))
- token_start++;
-
- const char *token_end = token_start + UUID_STR_LEN - 1 + 2;
- if (token_end > req + req_len)
- return BEARER_STATUS_BEARER_DOES_NOT_FIT;
-
- strncpyz(dst, token_start, dst_len - 1);
- uuid_t uuid;
- if (uuid_parse(dst, uuid) != 0)
- return BEARER_STATUS_NOT_PARSABLE;
-
- return BEARER_STATUS_EXTRACTED_FROM_HEADER;
-}
-
-BEARER_STATUS api_check_bearer_token(struct web_client *w) {
- if(!netdata_authorized_bearers)
- return BEARER_STATUS_NO_BEARERS_DICTIONARY;
-
- char token[UUID_STR_LEN];
- BEARER_STATUS t = extract_bearer_token_from_request(w, token, sizeof(token));
- if(t != BEARER_STATUS_EXTRACTED_FROM_HEADER)
- return t;
-
- struct bearer_token *z = dictionary_get(netdata_authorized_bearers, token);
- if(!z)
- return BEARER_STATUS_NOT_FOUND_IN_DICTIONARY;
-
- if(z->expires_s < now_monotonic_sec())
- return BEARER_STATUS_EXPIRED;
-
- return BEARER_STATUS_AVAILABLE_AND_VALIDATED;
-}
-
-static bool verify_agent_uuids(const char *machine_guid, const char *node_id, const char *claim_id) {
- if(!machine_guid || !node_id || !claim_id)
- return false;
-
- if(strcmp(machine_guid, localhost->machine_guid) != 0)
- return false;
-
- char *agent_claim_id = get_agent_claimid();
-
- bool not_verified = (!agent_claim_id || strcmp(claim_id, agent_claim_id) != 0);
- freez(agent_claim_id);
-
- if(not_verified || !localhost->node_id)
- return false;
-
- char buf[UUID_STR_LEN];
- uuid_unparse_lower(*localhost->node_id, buf);
-
- if(strcmp(node_id, buf) != 0)
- return false;
-
- return true;
-}
-
-int api_v2_bearer_protection(RRDHOST *host __maybe_unused, struct web_client *w __maybe_unused, char *url) {
- char *machine_guid = NULL;
- char *claim_id = NULL;
- char *node_id = NULL;
- bool protection = netdata_is_protected_by_bearer;
-
- while (url) {
- char *value = strsep_skip_consecutive_separators(&url, "&");
- if (!value || !*value) continue;
-
- char *name = strsep_skip_consecutive_separators(&value, "=");
- if (!name || !*name) continue;
- if (!value || !*value) continue;
-
- if(!strcmp(name, "bearer_protection")) {
- if(!strcmp(value, "on") || !strcmp(value, "true") || !strcmp(value, "yes"))
- protection = true;
- else
- protection = false;
- }
- else if(!strcmp(name, "machine_guid"))
- machine_guid = value;
- else if(!strcmp(name, "claim_id"))
- claim_id = value;
- else if(!strcmp(name, "node_id"))
- node_id = value;
- }
-
- if(!verify_agent_uuids(machine_guid, node_id, claim_id)) {
- buffer_flush(w->response.data);
- buffer_strcat(w->response.data, "The request is missing or not matching local UUIDs");
- return HTTP_RESP_BAD_REQUEST;
- }
-
- netdata_is_protected_by_bearer = protection;
-
- BUFFER *wb = w->response.data;
- buffer_flush(wb);
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
- buffer_json_member_add_boolean(wb, "bearer_protection", netdata_is_protected_by_bearer);
- buffer_json_finalize(wb);
-
- return HTTP_RESP_OK;
-}
-
-int api_v2_bearer_token(RRDHOST *host __maybe_unused, struct web_client *w __maybe_unused, char *url __maybe_unused) {
- char *machine_guid = NULL;
- char *claim_id = NULL;
- char *node_id = NULL;
-
- while(url) {
- char *value = strsep_skip_consecutive_separators(&url, "&");
- if (!value || !*value) continue;
-
- char *name = strsep_skip_consecutive_separators(&value, "=");
- if (!name || !*name) continue;
- if (!value || !*value) continue;
-
- if(!strcmp(name, "machine_guid"))
- machine_guid = value;
- else if(!strcmp(name, "claim_id"))
- claim_id = value;
- else if(!strcmp(name, "node_id"))
- node_id = value;
- }
-
- if(!verify_agent_uuids(machine_guid, node_id, claim_id)) {
- buffer_flush(w->response.data);
- buffer_strcat(w->response.data, "The request is missing or not matching local UUIDs");
- return HTTP_RESP_BAD_REQUEST;
- }
-
- uuid_t uuid;
- time_t expires_s = bearer_get_token(&uuid);
-
- BUFFER *wb = w->response.data;
- buffer_flush(wb);
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
- buffer_json_member_add_string(wb, "mg", localhost->machine_guid);
- buffer_json_member_add_boolean(wb, "bearer_protection", netdata_is_protected_by_bearer);
- buffer_json_member_add_uuid(wb, "token", &uuid);
- buffer_json_member_add_time_t(wb, "expiration", expires_s);
- buffer_json_finalize(wb);
-
- return HTTP_RESP_OK;
-}
-
-static int web_client_api_request_v2_contexts_internal(RRDHOST *host __maybe_unused, struct web_client *w, char *url, CONTEXTS_V2_MODE mode) {
- struct api_v2_contexts_request req = { 0 };
-
- while(url) {
- char *value = strsep_skip_consecutive_separators(&url, "&");
- if(!value || !*value) continue;
-
- char *name = strsep_skip_consecutive_separators(&value, "=");
- if(!name || !*name) continue;
- if(!value || !*value) continue;
-
- // name and value are now the parameters
- // they are not null and not empty
-
- if(!strcmp(name, "scope_nodes"))
- req.scope_nodes = value;
- else if(!strcmp(name, "nodes"))
- req.nodes = value;
- else if((mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) && !strcmp(name, "scope_contexts"))
- req.scope_contexts = value;
- else if((mode & (CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_SEARCH | CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) && !strcmp(name, "contexts"))
- req.contexts = value;
- else if((mode & CONTEXTS_V2_SEARCH) && !strcmp(name, "q"))
- req.q = value;
- else if(!strcmp(name, "options"))
- req.options = web_client_api_request_v2_context_options(value);
- else if(!strcmp(name, "after"))
- req.after = str2l(value);
- else if(!strcmp(name, "before"))
- req.before = str2l(value);
- else if(!strcmp(name, "timeout"))
- req.timeout_ms = str2l(value);
- else if(mode & (CONTEXTS_V2_ALERTS | CONTEXTS_V2_ALERT_TRANSITIONS)) {
- if (!strcmp(name, "alert"))
- req.alerts.alert = value;
- else if (!strcmp(name, "transition"))
- req.alerts.transition = value;
- else if(mode & CONTEXTS_V2_ALERTS) {
- if (!strcmp(name, "status"))
- req.alerts.status = web_client_api_request_v2_alert_status(value);
- }
- else if(mode & CONTEXTS_V2_ALERT_TRANSITIONS) {
- if (!strcmp(name, "last"))
- req.alerts.last = strtoul(value, NULL, 0);
- else if(!strcmp(name, "context"))
- req.contexts = value;
- else if (!strcmp(name, "anchor_gi")) {
- req.alerts.global_id_anchor = str2ull(value, NULL);
- }
- else {
- for(int i = 0; i < ATF_TOTAL_ENTRIES ;i++) {
- if(!strcmp(name, alert_transition_facets[i].query_param))
- req.alerts.facets[i] = value;
- }
- }
- }
- }
- }
-
- if ((mode & CONTEXTS_V2_ALERT_TRANSITIONS) && !req.alerts.last)
- req.alerts.last = 1;
-
- buffer_flush(w->response.data);
- buffer_no_cacheable(w->response.data);
- return rrdcontext_to_json_v2(w->response.data, &req, mode);
-}
-
-static int web_client_api_request_v2_alert_transitions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
- return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_ALERT_TRANSITIONS | CONTEXTS_V2_NODES);
-}
-
-static int web_client_api_request_v2_alerts(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
- return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_ALERTS | CONTEXTS_V2_NODES);
-}
-
-static int web_client_api_request_v2_functions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
- return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_FUNCTIONS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS);
-}
-
-static int web_client_api_request_v2_versions(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
- return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_VERSIONS);
-}
-
-static int web_client_api_request_v2_q(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
- return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_SEARCH | CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS);
-}
-
-static int web_client_api_request_v2_contexts(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
- return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_NODES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_VERSIONS);
-}
-
-static int web_client_api_request_v2_nodes(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
- return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_NODES | CONTEXTS_V2_NODES_INFO);
-}
-
-static int web_client_api_request_v2_info(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
- return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_AGENTS | CONTEXTS_V2_AGENTS_INFO);
-}
-
-static int web_client_api_request_v2_node_instances(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
- return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_NODES | CONTEXTS_V2_NODE_INSTANCES | CONTEXTS_V2_AGENTS | CONTEXTS_V2_AGENTS_INFO | CONTEXTS_V2_VERSIONS);
-}
-
-static int web_client_api_request_v2_weights(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
- return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_VALUE, WEIGHTS_FORMAT_MULTINODE, 2);
-}
-
-static int web_client_api_request_v2_claim(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
- return api_v2_claim(w, url);
-}
-
-static int web_client_api_request_v2_alert_config(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
- const char *config = NULL;
-
- while(url) {
- char *value = strsep_skip_consecutive_separators(&url, "&");
- if(!value || !*value) continue;
-
- char *name = strsep_skip_consecutive_separators(&value, "=");
- if(!name || !*name) continue;
- if(!value || !*value) continue;
-
- // name and value are now the parameters
- // they are not null and not empty
-
- if(!strcmp(name, "config"))
- config = value;
- }
-
- buffer_flush(w->response.data);
-
- if(!config) {
- w->response.data->content_type = CT_TEXT_PLAIN;
- buffer_strcat(w->response.data, "A config hash ID is required. Add ?config=UUID query param");
- return HTTP_RESP_BAD_REQUEST;
- }
-
- return contexts_v2_alert_config_to_json(w, config);
-}
-
-
-#define GROUP_BY_KEY_MAX_LENGTH 30
-static struct {
- char group_by[GROUP_BY_KEY_MAX_LENGTH + 1];
- char aggregation[GROUP_BY_KEY_MAX_LENGTH + 1];
- char group_by_label[GROUP_BY_KEY_MAX_LENGTH + 1];
-} group_by_keys[MAX_QUERY_GROUP_BY_PASSES];
-
-__attribute__((constructor)) void initialize_group_by_keys(void) {
- for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
- snprintfz(group_by_keys[g].group_by, GROUP_BY_KEY_MAX_LENGTH, "group_by[%zu]", g);
- snprintfz(group_by_keys[g].aggregation, GROUP_BY_KEY_MAX_LENGTH, "aggregation[%zu]", g);
- snprintfz(group_by_keys[g].group_by_label, GROUP_BY_KEY_MAX_LENGTH, "group_by_label[%zu]", g);
- }
-}
-
-static int web_client_api_request_v2_data(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
- usec_t received_ut = now_monotonic_usec();
-
- int ret = HTTP_RESP_BAD_REQUEST;
-
- buffer_flush(w->response.data);
-
- char *google_version = "0.6",
- *google_reqId = "0",
- *google_sig = "0",
- *google_out = "json",
- *responseHandler = NULL,
- *outFileName = NULL;
-
- time_t last_timestamp_in_data = 0, google_timestamp = 0;
-
- char *scope_nodes = NULL;
- char *scope_contexts = NULL;
- char *nodes = NULL;
- char *contexts = NULL;
- char *instances = NULL;
- char *dimensions = NULL;
- char *before_str = NULL;
- char *after_str = NULL;
- char *resampling_time_str = NULL;
- char *points_str = NULL;
- char *timeout_str = NULL;
- char *labels = NULL;
- char *alerts = NULL;
- char *time_group_options = NULL;
- char *tier_str = NULL;
- size_t tier = 0;
- RRDR_TIME_GROUPING time_group = RRDR_GROUPING_AVERAGE;
- DATASOURCE_FORMAT format = DATASOURCE_JSON2;
- RRDR_OPTIONS options = RRDR_OPTION_VIRTUAL_POINTS | RRDR_OPTION_JSON_WRAP | RRDR_OPTION_RETURN_JWAR;
-
- struct group_by_pass group_by[MAX_QUERY_GROUP_BY_PASSES] = {
- {
- .group_by = RRDR_GROUP_BY_DIMENSION,
- .group_by_label = NULL,
- .aggregation = RRDR_GROUP_BY_FUNCTION_AVERAGE,
- },
- };
-
- size_t group_by_idx = 0, group_by_label_idx = 0, aggregation_idx = 0;
-
- while(url) {
- char *value = strsep_skip_consecutive_separators(&url, "&");
- if(!value || !*value) continue;
-
- char *name = strsep_skip_consecutive_separators(&value, "=");
- if(!name || !*name) continue;
- if(!value || !*value) continue;
-
- // name and value are now the parameters
- // they are not null and not empty
-
- if(!strcmp(name, "scope_nodes")) scope_nodes = value;
- else if(!strcmp(name, "scope_contexts")) scope_contexts = value;
- else if(!strcmp(name, "nodes")) nodes = value;
- else if(!strcmp(name, "contexts")) contexts = value;
- else if(!strcmp(name, "instances")) instances = value;
- else if(!strcmp(name, "dimensions")) dimensions = value;
- else if(!strcmp(name, "labels")) labels = value;
- else if(!strcmp(name, "alerts")) alerts = value;
- else if(!strcmp(name, "after")) after_str = value;
- else if(!strcmp(name, "before")) before_str = value;
- else if(!strcmp(name, "points")) points_str = value;
- else if(!strcmp(name, "timeout")) timeout_str = value;
- else if(!strcmp(name, "group_by")) {
- group_by[group_by_idx++].group_by = group_by_parse(value);
- if(group_by_idx >= MAX_QUERY_GROUP_BY_PASSES)
- group_by_idx = MAX_QUERY_GROUP_BY_PASSES - 1;
- }
- else if(!strcmp(name, "group_by_label")) {
- group_by[group_by_label_idx++].group_by_label = value;
- if(group_by_label_idx >= MAX_QUERY_GROUP_BY_PASSES)
- group_by_label_idx = MAX_QUERY_GROUP_BY_PASSES - 1;
- }
- else if(!strcmp(name, "aggregation")) {
- group_by[aggregation_idx++].aggregation = group_by_aggregate_function_parse(value);
- if(aggregation_idx >= MAX_QUERY_GROUP_BY_PASSES)
- aggregation_idx = MAX_QUERY_GROUP_BY_PASSES - 1;
- }
- else if(!strcmp(name, "format")) format = web_client_api_request_v1_data_format(value);
- else if(!strcmp(name, "options")) options |= web_client_api_request_v1_data_options(value);
- else if(!strcmp(name, "time_group")) time_group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE);
- else if(!strcmp(name, "time_group_options")) time_group_options = value;
- else if(!strcmp(name, "time_resampling")) resampling_time_str = value;
- else if(!strcmp(name, "tier")) tier_str = value;
- else if(!strcmp(name, "callback")) responseHandler = value;
- else if(!strcmp(name, "filename")) outFileName = value;
- else if(!strcmp(name, "tqx")) {
- // parse Google Visualization API options
- // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source
- char *tqx_name, *tqx_value;
-
- while(value) {
- tqx_value = strsep_skip_consecutive_separators(&value, ";");
- if(!tqx_value || !*tqx_value) continue;
-
- tqx_name = strsep_skip_consecutive_separators(&tqx_value, ":");
- if(!tqx_name || !*tqx_name) continue;
- if(!tqx_value || !*tqx_value) continue;
-
- if(!strcmp(tqx_name, "version"))
- google_version = tqx_value;
- else if(!strcmp(tqx_name, "reqId"))
- google_reqId = tqx_value;
- else if(!strcmp(tqx_name, "sig")) {
- google_sig = tqx_value;
- google_timestamp = strtoul(google_sig, NULL, 0);
- }
- else if(!strcmp(tqx_name, "out")) {
- google_out = tqx_value;
- format = web_client_api_request_v1_data_google_format(google_out);
- }
- else if(!strcmp(tqx_name, "responseHandler"))
- responseHandler = tqx_value;
- else if(!strcmp(tqx_name, "outFileName"))
- outFileName = tqx_value;
- }
- }
- else {
- for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
- if(!strcmp(name, group_by_keys[g].group_by))
- group_by[g].group_by = group_by_parse(value);
- else if(!strcmp(name, group_by_keys[g].group_by_label))
- group_by[g].group_by_label = value;
- else if(!strcmp(name, group_by_keys[g].aggregation))
- group_by[g].aggregation = group_by_aggregate_function_parse(value);
- }
- }
- }
-
- // validate the google parameters given
- fix_google_param(google_out);
- fix_google_param(google_sig);
- fix_google_param(google_reqId);
- fix_google_param(google_version);
- fix_google_param(responseHandler);
- fix_google_param(outFileName);
-
- for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
- if (group_by[g].group_by_label && *group_by[g].group_by_label)
- group_by[g].group_by |= RRDR_GROUP_BY_LABEL;
- }
-
- if(group_by[0].group_by == RRDR_GROUP_BY_NONE)
- group_by[0].group_by = RRDR_GROUP_BY_DIMENSION;
-
- for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
- if ((group_by[g].group_by & ~(RRDR_GROUP_BY_DIMENSION)) || (options & RRDR_OPTION_PERCENTAGE)) {
- options |= RRDR_OPTION_ABSOLUTE;
- break;
- }
- }
-
- if(options & RRDR_OPTION_DEBUG)
- options &= ~RRDR_OPTION_MINIFY;
-
- if(tier_str && *tier_str) {
- tier = str2ul(tier_str);
- if(tier < storage_tiers)
- options |= RRDR_OPTION_SELECTED_TIER;
- else
- tier = 0;
- }
-
- time_t before = (before_str && *before_str)?str2l(before_str):0;
- time_t after = (after_str && *after_str) ?str2l(after_str):-600;
- size_t points = (points_str && *points_str)?str2u(points_str):0;
- int timeout = (timeout_str && *timeout_str)?str2i(timeout_str): 0;
- time_t resampling_time = (resampling_time_str && *resampling_time_str) ? str2l(resampling_time_str) : 0;
-
- QUERY_TARGET_REQUEST qtr = {
- .version = 2,
- .scope_nodes = scope_nodes,
- .scope_contexts = scope_contexts,
- .after = after,
- .before = before,
- .host = NULL,
- .st = NULL,
- .nodes = nodes,
- .contexts = contexts,
- .instances = instances,
- .dimensions = dimensions,
- .alerts = alerts,
- .timeout_ms = timeout,
- .points = points,
- .format = format,
- .options = options,
- .time_group_method = time_group,
- .time_group_options = time_group_options,
- .resampling_time = resampling_time,
- .tier = tier,
- .chart_label_key = NULL,
- .labels = labels,
- .query_source = QUERY_SOURCE_API_DATA,
- .priority = STORAGE_PRIORITY_NORMAL,
- .received_ut = received_ut,
-
- .interrupt_callback = web_client_interrupt_callback,
- .interrupt_callback_data = w,
- };
-
- for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++)
- qtr.group_by[g] = group_by[g];
-
- QUERY_TARGET *qt = query_target_create(&qtr);
- ONEWAYALLOC *owa = NULL;
-
- if(!qt) {
- buffer_sprintf(w->response.data, "Failed to prepare the query.");
- ret = HTTP_RESP_INTERNAL_SERVER_ERROR;
- goto cleanup;
- }
-
- web_client_timeout_checkpoint_set(w, timeout);
- if(web_client_timeout_checkpoint_and_check(w, NULL)) {
- ret = w->response.code;
- goto cleanup;
- }
-
- if(outFileName && *outFileName) {
- buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName);
- netdata_log_debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName);
- }
-
- if(format == DATASOURCE_DATATABLE_JSONP) {
- if(responseHandler == NULL)
- responseHandler = "google.visualization.Query.setResponse";
-
- netdata_log_debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSON/JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'",
- w->id, google_version, google_reqId, google_sig, google_out, responseHandler, outFileName
- );
-
- buffer_sprintf(
- w->response.data,
- "%s({version:'%s',reqId:'%s',status:'ok',sig:'%"PRId64"',table:",
- responseHandler,
- google_version,
- google_reqId,
- (int64_t)now_realtime_sec());
- }
- else if(format == DATASOURCE_JSONP) {
- if(responseHandler == NULL)
- responseHandler = "callback";
-
- buffer_strcat(w->response.data, responseHandler);
- buffer_strcat(w->response.data, "(");
- }
-
- owa = onewayalloc_create(0);
- ret = data_query_execute(owa, w->response.data, qt, &last_timestamp_in_data);
-
- if(format == DATASOURCE_DATATABLE_JSONP) {
- if(google_timestamp < last_timestamp_in_data)
- buffer_strcat(w->response.data, "});");
-
- else {
- // the client already has the latest data
- buffer_flush(w->response.data);
- buffer_sprintf(w->response.data,
- "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});",
- responseHandler, google_version, google_reqId);
- }
- }
- else if(format == DATASOURCE_JSONP)
- buffer_strcat(w->response.data, ");");
-
- if(qt->internal.relative)
- buffer_no_cacheable(w->response.data);
- else
- buffer_cacheable(w->response.data);
-
-cleanup:
- query_target_release(qt);
- onewayalloc_destroy(owa);
- return ret;
-}
-
-static int web_client_api_request_v2_webrtc(RRDHOST *host __maybe_unused, struct web_client *w, char *url __maybe_unused) {
- return webrtc_new_connection(w->post_payload, w->response.data);
-}
-
-#define CONFIG_API_V2_URL "/api/v2/config"
-static int web_client_api_request_v2_config(RRDHOST *host __maybe_unused, struct web_client *w, char *query __maybe_unused) {
-
- char *url = strdupz(buffer_tostring(w->url_as_received));
- char *url_full = url;
-
- buffer_flush(w->response.data);
-
- if (strncmp(url, "/host/", strlen("/host/")) == 0) {
- url += strlen("/host/");
- char *host_id_end = strchr(url, '/');
- if (host_id_end == NULL) {
- buffer_sprintf(w->response.data, "Invalid URL");
- freez(url_full);
- return HTTP_RESP_BAD_REQUEST;
- }
- url += host_id_end - url;
- }
-
- if (strncmp(url, CONFIG_API_V2_URL, strlen(CONFIG_API_V2_URL)) != 0) {
- buffer_sprintf(w->response.data, "Invalid URL");
- freez(url_full);
- return HTTP_RESP_BAD_REQUEST;
- }
- url += strlen(CONFIG_API_V2_URL);
-
- char *save_ptr = NULL;
- char *plugin = strtok_r(url, "/", &save_ptr);
- char *module = strtok_r(NULL, "/", &save_ptr);
- char *job_id = strtok_r(NULL, "/", &save_ptr);
- char *extra = strtok_r(NULL, "/", &save_ptr);
-
- if (extra != NULL) {
- buffer_sprintf(w->response.data, "Invalid URL");
- freez(url_full);
- return HTTP_RESP_BAD_REQUEST;
- }
-
- int http_method;
- switch (w->mode)
- {
- case WEB_CLIENT_MODE_GET:
- http_method = HTTP_METHOD_GET;
- break;
- case WEB_CLIENT_MODE_POST:
- http_method = HTTP_METHOD_POST;
- break;
- case WEB_CLIENT_MODE_PUT:
- http_method = HTTP_METHOD_PUT;
- break;
- case WEB_CLIENT_MODE_DELETE:
- http_method = HTTP_METHOD_DELETE;
- break;
- default:
- buffer_sprintf(w->response.data, "Invalid HTTP method");
- freez(url_full);
- return HTTP_RESP_BAD_REQUEST;
- }
-
- struct uni_http_response resp = dyn_conf_process_http_request(host->configurable_plugins, http_method, plugin, module, job_id, w->post_payload, w->post_payload_size);
- if (resp.content[resp.content_length - 1] != '\0') {
- char *con = mallocz(resp.content_length + 1);
- memcpy(con, resp.content, resp.content_length);
- con[resp.content_length] = '\0';
- if (resp.content_free)
- resp.content_free(resp.content);
- resp.content = con;
- resp.content_free = freez_dyncfg;
- }
- buffer_strcat(w->response.data, resp.content);
- if (resp.content_free)
- resp.content_free(resp.content);
- w->response.data->content_type = resp.content_type;
- freez(url_full);
- return resp.status;
-}
-
-static json_object *job_statuses_grouped() {
- json_object *top_obj = json_object_new_object();
- json_object *host_vec = json_object_new_array();
-
-
- RRDHOST *host;
-
- dfe_start_reentrant(rrdhost_root_index, host) {
- json_object *host_obj = json_object_new_object();
- json_object *host_sub_obj = json_object_new_string(host->machine_guid);
- json_object_object_add(host_obj, "host_guid", host_sub_obj);
- host_sub_obj = json_object_new_array();
-
- DICTIONARY *plugins_dict = host->configurable_plugins;
-
- struct configurable_plugin *plugin;
- dfe_start_read(plugins_dict, plugin) {
- json_object *plugin_obj = json_object_new_object();
- json_object *plugin_sub_obj = json_object_new_string(plugin->name);
- json_object_object_add(plugin_obj, "name", plugin_sub_obj);
- plugin_sub_obj = json_object_new_array();
-
- struct module *module;
- dfe_start_read(plugin->modules, module) {
- json_object *module_obj = json_object_new_object();
- json_object *module_sub_obj = json_object_new_string(module->name);
- json_object_object_add(module_obj, "name", module_sub_obj);
- module_sub_obj = json_object_new_array();
-
- struct job *job;
- dfe_start_read(module->jobs, job) {
- json_object *job_obj = json_object_new_object();
- json_object *job_sub_obj = json_object_new_string(job->name);
- json_object_object_add(job_obj, "name", job_sub_obj);
- job_sub_obj = job2json(job);
- json_object_object_add(job_obj, "job", job_sub_obj);
- json_object_array_add(module_sub_obj, job_obj);
- } dfe_done(job);
- json_object_object_add(module_obj, "jobs", module_sub_obj);
- json_object_array_add(plugin_sub_obj, module_obj);
- } dfe_done(module);
- json_object_object_add(plugin_obj, "modules", plugin_sub_obj);
- json_object_array_add(host_sub_obj, plugin_obj);
- } dfe_done(plugin);
- json_object_object_add(host_obj, "plugins", host_sub_obj);
- json_object_array_add(host_vec, host_obj);
- }
- dfe_done(host);
-
- json_object_object_add(top_obj, "hosts", host_vec);
- return top_obj;
-}
-
-static json_object *job_statuses_flat() {
- RRDHOST *host;
-
- json_object *ret = json_object_new_array();
-
- dfe_start_reentrant(rrdhost_root_index, host) {
- DICTIONARY *plugins_dict = host->configurable_plugins;
-
- struct configurable_plugin *plugin;
- dfe_start_read(plugins_dict, plugin) {
- struct module *module;
- dfe_start_read(plugin->modules, module) {
- struct job *job;
- dfe_start_read(module->jobs, job) {
- json_object *job_rich = json_object_new_object();
- json_object *obj = json_object_new_string(host->machine_guid);
- json_object_object_add(job_rich, "host_guid", obj);
- obj = json_object_new_string(plugin->name);
- json_object_object_add(job_rich, "plugin_name", obj);
- obj = json_object_new_string(module->name);
- json_object_object_add(job_rich, "module_name", obj);
- obj = job2json(job);
- json_object_object_add(job_rich, "job", obj);
- json_object_array_add(ret, job_rich);
- } dfe_done(job);
- } dfe_done(module);
- } dfe_done(plugin);
- }
- dfe_done(host);
-
- return ret;
-}
-
-static int web_client_api_request_v2_job_statuses(RRDHOST *host __maybe_unused, struct web_client *w, char *query) {
- json_object *json;
- if (strstr(query, "grouped") != NULL)
- json = job_statuses_grouped();
- else
- json = job_statuses_flat();
-
- buffer_flush(w->response.data);
- buffer_strcat(w->response.data, json_object_to_json_string_ext(json, JSON_C_TO_STRING_PRETTY));
- w->response.data->content_type = CT_APPLICATION_JSON;
- return HTTP_RESP_OK;
-}
-
-static struct web_api_command api_commands_v2[] = {
- {"info", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_info, 0},
-
- {"data", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_data, 0},
- {"weights", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_weights, 0},
-
- {"contexts", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_contexts, 0},
- {"nodes", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_nodes, 0},
- {"node_instances", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_node_instances, 0},
- {"versions", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_versions, 0},
- {"functions", 0, WEB_CLIENT_ACL_ACLK_WEBRTC_DASHBOARD_WITH_BEARER | ACL_DEV_OPEN_ACCESS, web_client_api_request_v2_functions, 0},
- {"q", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_q, 0},
- {"alerts", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_alerts, 0},
-
- {"alert_transitions", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_alert_transitions, 0},
- {"alert_config", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_alert_config, 0},
-
- {"claim", 0, WEB_CLIENT_ACL_NOCHECK, web_client_api_request_v2_claim, 0},
-
- {"rtc_offer", 0, WEB_CLIENT_ACL_ACLK | ACL_DEV_OPEN_ACCESS, web_client_api_request_v2_webrtc, 0},
- {"bearer_protection", 0, WEB_CLIENT_ACL_ACLK | ACL_DEV_OPEN_ACCESS, api_v2_bearer_protection, 0},
- {"bearer_get_token", 0, WEB_CLIENT_ACL_ACLK | ACL_DEV_OPEN_ACCESS, api_v2_bearer_token, 0},
-
- {"config", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_config, 1},
- {"job_statuses", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_job_statuses, 0},
-
- { "ilove.svg", 0, WEB_CLIENT_ACL_NOCHECK, web_client_api_request_v2_ilove, 0 },
-
- // terminator
- {NULL, 0, WEB_CLIENT_ACL_NONE, NULL, 0},
-};
-
-inline int web_client_api_request_v2(RRDHOST *host, struct web_client *w, char *url_path_endpoint) {
- static int initialized = 0;
-
- if(unlikely(initialized == 0)) {
- initialized = 1;
-
- for(int i = 0; api_commands_v2[i].command ; i++)
- api_commands_v2[i].hash = simple_hash(api_commands_v2[i].command);
- }
-
- return web_client_api_request_vX(host, w, url_path_endpoint, api_commands_v2);
-}