summaryrefslogtreecommitdiffstats
path: root/web/api
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-05-08 16:27:04 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-05-08 16:27:04 +0000
commita836a244a3d2bdd4da1ee2641e3e957850668cea (patch)
treecb87c75b3677fab7144f868435243f864048a1e6 /web/api
parentAdding upstream version 1.38.1. (diff)
downloadnetdata-a836a244a3d2bdd4da1ee2641e3e957850668cea.tar.xz
netdata-a836a244a3d2bdd4da1ee2641e3e957850668cea.zip
Adding upstream version 1.39.0.upstream/1.39.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--web/api/README.md20
-rw-r--r--web/api/badges/README.md4
-rw-r--r--web/api/badges/web_buffer_svg.c8
-rw-r--r--web/api/exporters/README.md4
-rw-r--r--web/api/exporters/allmetrics.c14
-rw-r--r--web/api/exporters/prometheus/README.md4
-rw-r--r--web/api/exporters/shell/README.md8
-rw-r--r--web/api/exporters/shell/allmetrics_shell.c8
-rw-r--r--web/api/formatters/README.md4
-rw-r--r--web/api/formatters/charts2json.c52
-rw-r--r--web/api/formatters/charts2json.h1
-rw-r--r--web/api/formatters/csv/README.md4
-rw-r--r--web/api/formatters/csv/csv.c61
-rw-r--r--web/api/formatters/json/README.md4
-rw-r--r--web/api/formatters/json/json.c170
-rw-r--r--web/api/formatters/json/json.h1
-rw-r--r--web/api/formatters/json_wrapper.c1806
-rw-r--r--web/api/formatters/json_wrapper.h14
-rw-r--r--web/api/formatters/rrd2json.c231
-rw-r--r--web/api/formatters/rrd2json.h53
-rw-r--r--web/api/formatters/rrdset2json.c8
-rw-r--r--web/api/formatters/ssv/README.md4
-rw-r--r--web/api/formatters/ssv/ssv.c10
-rw-r--r--web/api/formatters/value/README.md4
-rw-r--r--web/api/formatters/value/value.c86
-rw-r--r--web/api/formatters/value/value.h4
-rw-r--r--web/api/health/README.md4
-rw-r--r--web/api/health/health_cmdapi.c8
-rw-r--r--web/api/netdata-swagger.json3713
-rw-r--r--web/api/netdata-swagger.yaml3125
-rw-r--r--web/api/queries/README.md41
-rw-r--r--web/api/queries/average/README.md4
-rw-r--r--web/api/queries/average/average.c55
-rw-r--r--web/api/queries/average/average.h57
-rw-r--r--web/api/queries/countif/README.md4
-rw-r--r--web/api/queries/countif/countif.c129
-rw-r--r--web/api/queries/countif/countif.h143
-rw-r--r--web/api/queries/des/README.md4
-rw-r--r--web/api/queries/des/des.c129
-rw-r--r--web/api/queries/des/des.h133
-rw-r--r--web/api/queries/incremental_sum/README.md4
-rw-r--r--web/api/queries/incremental_sum/incremental_sum.c59
-rw-r--r--web/api/queries/incremental_sum/incremental_sum.h64
-rw-r--r--web/api/queries/max/README.md4
-rw-r--r--web/api/queries/max/max.c50
-rw-r--r--web/api/queries/max/max.h54
-rw-r--r--web/api/queries/median/README.md4
-rw-r--r--web/api/queries/median/median.c134
-rw-r--r--web/api/queries/median/median.h146
-rw-r--r--web/api/queries/min/README.md4
-rw-r--r--web/api/queries/min/min.c50
-rw-r--r--web/api/queries/min/min.h54
-rw-r--r--web/api/queries/percentile/README.md4
-rw-r--r--web/api/queries/percentile/percentile.c163
-rw-r--r--web/api/queries/percentile/percentile.h175
-rw-r--r--web/api/queries/query.c2570
-rw-r--r--web/api/queries/query.h54
-rw-r--r--web/api/queries/rrdr.c77
-rw-r--r--web/api/queries/rrdr.h189
-rw-r--r--web/api/queries/ses/README.md4
-rw-r--r--web/api/queries/ses/ses.c82
-rw-r--r--web/api/queries/ses/ses.h87
-rw-r--r--web/api/queries/stddev/README.md4
-rw-r--r--web/api/queries/stddev/stddev.c116
-rw-r--r--web/api/queries/stddev/stddev.h118
-rw-r--r--web/api/queries/sum/README.md4
-rw-r--r--web/api/queries/sum/sum.c46
-rw-r--r--web/api/queries/sum/sum.h51
-rw-r--r--web/api/queries/trimmed_mean/README.md4
-rw-r--r--web/api/queries/trimmed_mean/trimmed_mean.c159
-rw-r--r--web/api/queries/trimmed_mean/trimmed_mean.h171
-rw-r--r--web/api/queries/weights.c1549
-rw-r--r--web/api/queries/weights.h45
-rw-r--r--web/api/web_api.c216
-rw-r--r--web/api/web_api.h39
-rw-r--r--web/api/web_api_v1.c764
-rw-r--r--web/api/web_api_v1.h17
-rw-r--r--web/api/web_api_v2.c372
-rw-r--r--web/api/web_api_v2.h12
79 files changed, 11848 insertions, 6007 deletions
diff --git a/web/api/README.md b/web/api/README.md
index 82a55eb25..237394a88 100644
--- a/web/api/README.md
+++ b/web/api/README.md
@@ -1,16 +1,12 @@
-<!--
-title: "API"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/web/api/README.md"
-sidebar_label: "API"
-learn_status: "Published"
-learn_topic_type: "Tasks"
-learn_rel_path: "Operations"
--->
-
# API
-## Netdata REST API
+## Netdata agent REST API
+
+The complete documentation of the Netdata agent's REST API is documented in the OpenAPI format [in our GitHub repository](https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml).
+
+You can explore it using the **[Swagger UI](https://learn.netdata.cloud/api)**, or the **[Swagger Editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml)**.
-The complete documentation of the Netdata API is available as a Swagger API document [in our GitHub repository](https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml). You can view it online using the **[Swagger Editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml)**.
+## Netdata cloud API
-If your prefer it over the Swagger Editor, you can also use [Swagger UI](https://github.com/swagger-api/swagger-ui) by pointing at `web/api/netdata-swagger.yaml` in the Netdata source tree (or at https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml if you want to use the Swagger API definitions directly from our GitHub repository). This however does not provide all the information available.
+A very basic Netdata cloud REST API supports the [Grafana data source plugin](https://github.com/netdata/netdata-grafana-datasource-plugin/blob/master/README.md),
+but has not yet been expanded for wider use. We intend to provide a properly documented API in the future.
diff --git a/web/api/badges/README.md b/web/api/badges/README.md
index 8f6eca62a..e40e706eb 100644
--- a/web/api/badges/README.md
+++ b/web/api/badges/README.md
@@ -1,6 +1,10 @@
<!--
title: "Netdata badges"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/badges/README.md
+sidebar_label: "Netdata badges"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api"
-->
# Netdata badges
diff --git a/web/api/badges/web_buffer_svg.c b/web/api/badges/web_buffer_svg.c
index ca0f4b7a0..b69f35afa 100644
--- a/web/api/badges/web_buffer_svg.c
+++ b/web/api/badges/web_buffer_svg.c
@@ -767,7 +767,7 @@ void buffer_svg(BUFFER *wb, const char *label,
label_color_parsed = parse_color_argument(label_color, "555");
value_color_parsed = parse_color_argument(value_color_buffer, "555");
- wb->contenttype = CT_IMAGE_SVG_XML;
+ wb->content_type = CT_IMAGE_SVG_XML;
total_width = total_width * scale / 100.0;
height = height * scale / 100.0;
@@ -898,10 +898,10 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u
RRDSET *st = NULL;
while(url) {
- char *value = mystrsep(&url, "&");
+ char *value = strsep_skip_consecutive_separators(&url, "&");
if(!value || !*value) continue;
- char *name = mystrsep(&value, "=");
+ char *name = strsep_skip_consecutive_separators(&value, "=");
if(!name || !*name) continue;
if(!value || !*value) continue;
@@ -923,7 +923,7 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u
else if(!strcmp(name, "points")) points_str = value;
else if(!strcmp(name, "group_options")) group_options = value;
else if(!strcmp(name, "group")) {
- group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE);
+ group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE);
}
else if(!strcmp(name, "options")) {
options |= web_client_api_request_v1_data_options(value);
diff --git a/web/api/exporters/README.md b/web/api/exporters/README.md
index 1d517a91e..4be567691 100644
--- a/web/api/exporters/README.md
+++ b/web/api/exporters/README.md
@@ -1,6 +1,10 @@
<!--
title: "Exporters"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/README.md
+sidebar_label: "Exporters"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api"
-->
# Exporters
diff --git a/web/api/exporters/allmetrics.c b/web/api/exporters/allmetrics.c
index 88065400d..cad52a7d5 100644
--- a/web/api/exporters/allmetrics.c
+++ b/web/api/exporters/allmetrics.c
@@ -39,10 +39,10 @@ inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client
prometheus_prefix = global_exporting_prefix;
while(url) {
- char *value = mystrsep(&url, "&");
+ char *value = strsep_skip_consecutive_separators(&url, "&");
if (!value || !*value) continue;
- char *name = mystrsep(&value, "=");
+ char *name = strsep_skip_consecutive_separators(&value, "=");
if(!name || !*name) continue;
if(!value || !*value) continue;
@@ -90,17 +90,17 @@ inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client
switch(format) {
case ALLMETRICS_JSON:
- w->response.data->contenttype = CT_APPLICATION_JSON;
+ w->response.data->content_type = CT_APPLICATION_JSON;
rrd_stats_api_v1_charts_allmetrics_json(host, filter, w->response.data);
return HTTP_RESP_OK;
case ALLMETRICS_SHELL:
- w->response.data->contenttype = CT_TEXT_PLAIN;
+ w->response.data->content_type = CT_TEXT_PLAIN;
rrd_stats_api_v1_charts_allmetrics_shell(host, filter, w->response.data);
return HTTP_RESP_OK;
case ALLMETRICS_PROMETHEUS:
- w->response.data->contenttype = CT_PROMETHEUS;
+ w->response.data->content_type = CT_PROMETHEUS;
rrd_stats_api_v1_charts_allmetrics_prometheus_single_host(
host
, filter
@@ -113,7 +113,7 @@ inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client
return HTTP_RESP_OK;
case ALLMETRICS_PROMETHEUS_ALL_HOSTS:
- w->response.data->contenttype = CT_PROMETHEUS;
+ w->response.data->content_type = CT_PROMETHEUS;
rrd_stats_api_v1_charts_allmetrics_prometheus_all_hosts(
host
, filter
@@ -126,7 +126,7 @@ inline int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client
return HTTP_RESP_OK;
default:
- w->response.data->contenttype = CT_TEXT_PLAIN;
+ w->response.data->content_type = CT_TEXT_PLAIN;
buffer_strcat(w->response.data, "Which format? '" ALLMETRICS_FORMAT_SHELL "', '" ALLMETRICS_FORMAT_PROMETHEUS "', '" ALLMETRICS_FORMAT_PROMETHEUS_ALL_HOSTS "' and '" ALLMETRICS_FORMAT_JSON "' are currently supported.");
return HTTP_RESP_BAD_REQUEST;
}
diff --git a/web/api/exporters/prometheus/README.md b/web/api/exporters/prometheus/README.md
index 1ff86f4e0..5e0f98c16 100644
--- a/web/api/exporters/prometheus/README.md
+++ b/web/api/exporters/prometheus/README.md
@@ -1,6 +1,10 @@
<!--
title: "Prometheus exporter"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/prometheus/README.md
+sidebar_label: "Prometheus exporter"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Exporters"
-->
# Prometheus exporter
diff --git a/web/api/exporters/shell/README.md b/web/api/exporters/shell/README.md
index a41326c9d..a17ff1148 100644
--- a/web/api/exporters/shell/README.md
+++ b/web/api/exporters/shell/README.md
@@ -1,9 +1,13 @@
<!--
-title: "shell exporter"
+title: "Shell exporter"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/exporters/shell/README.md
+sidebar_label: "Shell exporter"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Exporters"
-->
-# shell exporter
+# Shell exporter
Shell scripts can now query Netdata:
diff --git a/web/api/exporters/shell/allmetrics_shell.c b/web/api/exporters/shell/allmetrics_shell.c
index dded5a536..fbfd6b574 100644
--- a/web/api/exporters/shell/allmetrics_shell.c
+++ b/web/api/exporters/shell/allmetrics_shell.c
@@ -24,12 +24,12 @@ static inline size_t shell_name_copy(char *d, const char *s, size_t usable) {
void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_string, BUFFER *wb) {
analytics_log_shell();
- SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT);
+ SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT, true);
// for each chart
RRDSET *st;
rrdset_foreach_read(st, host) {
- if (filter && !simple_pattern_matches(filter, rrdset_name(st)))
+ if (filter && !simple_pattern_matches_string(filter, st->name))
continue;
NETDATA_DOUBLE total = 0.0;
@@ -97,7 +97,7 @@ void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_
void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_string, BUFFER *wb) {
analytics_log_json();
- SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT);
+ SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT, true);
buffer_strcat(wb, "{");
@@ -107,7 +107,7 @@ void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_s
// for each chart
RRDSET *st;
rrdset_foreach_read(st, host) {
- if (filter && !(simple_pattern_matches(filter, rrdset_id(st)) || simple_pattern_matches(filter, rrdset_name(st))))
+ if (filter && !(simple_pattern_matches_string(filter, st->id) || simple_pattern_matches_string(filter, st->name)))
continue;
if(rrdset_is_available_for_viewers(st)) {
diff --git a/web/api/formatters/README.md b/web/api/formatters/README.md
index 4c281f064..ddc70d90f 100644
--- a/web/api/formatters/README.md
+++ b/web/api/formatters/README.md
@@ -1,6 +1,10 @@
<!--
title: "Query formatting"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/README.md
+sidebar_label: "Query formatting"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Formatters"
-->
# Query formatting
diff --git a/web/api/formatters/charts2json.c b/web/api/formatters/charts2json.c
index 61a9ecf2f..4b6b095c2 100644
--- a/web/api/formatters/charts2json.c
+++ b/web/api/formatters/charts2json.c
@@ -137,55 +137,3 @@ void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived
buffer_sprintf(wb, "\n\t]\n}\n");
}
-
-// generate collectors list for the api/v1/info call
-
-struct collector {
- const char *plugin;
- const char *module;
-};
-
-struct array_printer {
- int c;
- BUFFER *wb;
-};
-
-static int print_collector_callback(const DICTIONARY_ITEM *item __maybe_unused, void *entry, void *data) {
- struct array_printer *ap = (struct array_printer *)data;
- BUFFER *wb = ap->wb;
- struct collector *col=(struct collector *) entry;
- if(ap->c) buffer_strcat(wb, ",");
- buffer_strcat(wb, "\n\t\t{\n\t\t\t\"plugin\": \"");
- buffer_strcat(wb, col->plugin);
- buffer_strcat(wb, "\",\n\t\t\t\"module\": \"");
- buffer_strcat(wb, col->module);
- buffer_strcat(wb, "\"\n\t\t}");
- (ap->c)++;
- return 0;
-}
-
-void chartcollectors2json(RRDHOST *host, BUFFER *wb) {
- DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED);
- RRDSET *st;
- char name[500];
-
- time_t now = now_realtime_sec();
- rrdset_foreach_read(st, host) {
- if (rrdset_is_available_for_viewers(st)) {
- struct collector col = {
- .plugin = rrdset_plugin_name(st),
- .module = rrdset_module_name(st)
- };
- sprintf(name, "%s:%s", col.plugin, col.module);
- dictionary_set(dict, name, &col, sizeof(struct collector));
- st->last_accessed_time_s = now;
- }
- }
- rrdset_foreach_done(st);
- struct array_printer ap = {
- .c = 0,
- .wb = wb
- };
- dictionary_walkthrough_read(dict, print_collector_callback, &ap);
- dictionary_destroy(dict);
-}
diff --git a/web/api/formatters/charts2json.h b/web/api/formatters/charts2json.h
index d4b04af58..96720d4b4 100644
--- a/web/api/formatters/charts2json.h
+++ b/web/api/formatters/charts2json.h
@@ -6,7 +6,6 @@
#include "rrd2json.h"
void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived);
-void chartcollectors2json(RRDHOST *host, BUFFER *wb);
const char* get_release_channel();
#endif //NETDATA_API_FORMATTER_CHARTS2JSON_H
diff --git a/web/api/formatters/csv/README.md b/web/api/formatters/csv/README.md
index fc5ffec1b..4585710b4 100644
--- a/web/api/formatters/csv/README.md
+++ b/web/api/formatters/csv/README.md
@@ -1,6 +1,10 @@
<!--
title: "CSV formatter"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/csv/README.md
+sidebar_label: "CSV formatter"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Formatters"
-->
# CSV formatter
diff --git a/web/api/formatters/csv/csv.c b/web/api/formatters/csv/csv.c
index 18009f146..8f4950ddd 100644
--- a/web/api/formatters/csv/csv.c
+++ b/web/api/formatters/csv/csv.c
@@ -5,15 +5,13 @@
void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines) {
//info("RRD2CSV(): %s: BEGIN", r->st->id);
- QUERY_TARGET *qt = r->internal.qt;
long c, i;
- const long used = qt->query.used;
+ const long used = (long)r->d;
// print the csv header
for(c = 0, i = 0; c < used ; c++) {
- if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- if(unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
- if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
if(!i) {
buffer_strcat(wb, startline);
@@ -23,7 +21,7 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const
}
buffer_strcat(wb, separator);
if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
- buffer_strcat(wb, string2str(qt->query.array[c].dimension.name));
+ buffer_strcat(wb, string2str(r->dn[c]));
if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
i++;
}
@@ -32,9 +30,8 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const
if(format == DATASOURCE_CSV_MARKDOWN) {
// print the --- line after header
for(c = 0, i = 0; c < used ;c++) {
- if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- if(unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
- if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
if(!i) {
buffer_strcat(wb, startline);
@@ -64,7 +61,6 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const
}
// for each line in the array
- NETDATA_DOUBLE total = 1;
for(i = start; i != end ;i += step) {
NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
@@ -76,7 +72,7 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const
if((options & RRDR_OPTION_SECONDS) || (options & RRDR_OPTION_MILLISECONDS)) {
// print the timestamp of the line
- buffer_rrd_value(wb, (NETDATA_DOUBLE)now);
+ buffer_print_netdata_double(wb, (NETDATA_DOUBLE) now);
// in ms
if(options & RRDR_OPTION_MILLISECONDS) buffer_strcat(wb, "000");
}
@@ -87,29 +83,10 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const
buffer_date(wb, tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec);
}
- int set_min_max = 0;
- if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
- total = 0;
- for(c = 0; c < used ;c++) {
- if(unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
-
- NETDATA_DOUBLE n = cn[c];
-
- if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
- n = -n;
-
- total += n;
- }
- // prevent a division by zero
- if(total == 0) total = 1;
- set_min_max = 1;
- }
-
// for each dimension
for(c = 0; c < used ;c++) {
- if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- if(unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
- if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
buffer_strcat(wb, separator);
@@ -121,24 +98,8 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const
else
buffer_strcat(wb, "null");
}
- else {
- if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
- n = -n;
-
- if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
- n = n * 100 / total;
-
- if(unlikely(set_min_max)) {
- r->min = r->max = n;
- set_min_max = 0;
- }
-
- if(n < r->min) r->min = n;
- if(n > r->max) r->max = n;
- }
-
- buffer_rrd_value(wb, n);
- }
+ else
+ buffer_print_netdata_double(wb, n);
}
buffer_strcat(wb, endline);
diff --git a/web/api/formatters/json/README.md b/web/api/formatters/json/README.md
index 75f729ada..bc70aec02 100644
--- a/web/api/formatters/json/README.md
+++ b/web/api/formatters/json/README.md
@@ -1,6 +1,10 @@
<!--
title: "JSON formatter"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/json/README.md
+sidebar_label: "JSON formatter"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Formatters"
-->
# JSON formatter
diff --git a/web/api/formatters/json/json.c b/web/api/formatters/json/json.c
index 3cad3e914..d5b8c7570 100644
--- a/web/api/formatters/json/json.c
+++ b/web/api/formatters/json/json.c
@@ -42,7 +42,7 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
strcpy(post_value, "}");
strcpy(post_line, "]}");
snprintfz(data_begin, 100, "\n ],\n %srows%s:\n [\n", kq, kq);
- strcpy(finish, "\n]\n}");
+ strcpy(finish, "\n ]\n }");
snprintfz(overflow_annotation, 200, ",{%sv%s:%sRESET OR OVERFLOW%s},{%sv%s:%sThe counters have been wrapped.%s}", kq, kq, sq, sq, kq, kq, sq, sq);
snprintfz(normal_annotation, 200, ",{%sv%s:null},{%sv%s:null}", kq, kq, kq, kq);
@@ -69,9 +69,9 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
dates_with_new = 0;
}
if( options & RRDR_OPTION_OBJECTSROWS )
- strcpy(pre_date, " { ");
+ strcpy(pre_date, " {");
else
- strcpy(pre_date, " [ ");
+ strcpy(pre_date, " [");
strcpy(pre_label, ",\"");
strcpy(post_label, "\"");
strcpy(pre_value, ",");
@@ -79,10 +79,10 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
strcpy(post_line, "}");
else
strcpy(post_line, "]");
- snprintfz(data_begin, 100, "],\n %sdata%s:\n [\n", kq, kq);
- strcpy(finish, "\n]\n}");
+ snprintfz(data_begin, 100, "],\n %sdata%s:[\n", kq, kq);
+ strcpy(finish, "\n ]\n }");
- buffer_sprintf(wb, "{\n %slabels%s: [", kq, kq);
+ buffer_sprintf(wb, "{\n %slabels%s:[", kq, kq);
buffer_sprintf(wb, "%stime%s", sq, sq);
if( options & RRDR_OPTION_OBJECTSROWS )
@@ -104,18 +104,16 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
// -------------------------------------------------------------------------
// print the JSON header
- QUERY_TARGET *qt = r->internal.qt;
long c, i;
- const long used = qt->query.used;
+ const long used = (long)r->d;
// print the header lines
for(c = 0, i = 0; c < used ; c++) {
- if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- if(unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
- if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
buffer_fast_strcat(wb, pre_label, pre_label_len);
- buffer_strcat(wb, string2str(qt->query.array[c].dimension.name));
+ buffer_strcat(wb, string2str(r->dn[c]));
buffer_fast_strcat(wb, post_label, post_label_len);
i++;
}
@@ -151,7 +149,6 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
);
// for each line in the array
- NETDATA_DOUBLE total = 1;
for(i = start; i != end ;i += step) {
NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
@@ -203,7 +200,7 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
if(unlikely( options & RRDR_OPTION_OBJECTSROWS ))
buffer_fast_strcat(wb, object_rows_time, object_rows_time_len);
- buffer_rrd_value(wb, (NETDATA_DOUBLE)r->t[i]);
+ buffer_print_netdata_double(wb, (NETDATA_DOUBLE) r->t[i]);
// in ms
if(unlikely(options & RRDR_OPTION_MILLISECONDS))
@@ -212,33 +209,10 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
buffer_fast_strcat(wb, post_date, post_date_len);
}
- int set_min_max = 0;
- if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
- total = 0;
- for(c = 0; c < used ;c++) {
- if(unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
-
- NETDATA_DOUBLE n;
- if(unlikely(options & RRDR_OPTION_INTERNAL_AR))
- n = ar[c];
- else
- n = cn[c];
-
- if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
- n = -n;
-
- total += n;
- }
- // prevent a division by zero
- if(total == 0) total = 1;
- set_min_max = 1;
- }
-
// for each dimension
for(c = 0; c < used ;c++) {
- if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- if(unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
- if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
NETDATA_DOUBLE n;
if(unlikely(options & RRDR_OPTION_INTERNAL_AR))
@@ -249,39 +223,119 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
buffer_fast_strcat(wb, pre_value, pre_value_len);
if(unlikely( options & RRDR_OPTION_OBJECTSROWS ))
- buffer_sprintf(wb, "%s%s%s: ", kq, string2str(qt->query.array[c].dimension.name), kq);
+ buffer_sprintf(wb, "%s%s%s: ", kq, string2str(r->dn[c]), kq);
- if(co[c] & RRDR_VALUE_EMPTY && !(options & RRDR_OPTION_INTERNAL_AR)) {
+ if(co[c] & RRDR_VALUE_EMPTY && !(options & (RRDR_OPTION_INTERNAL_AR))) {
if(unlikely(options & RRDR_OPTION_NULL2ZERO))
buffer_fast_strcat(wb, "0", 1);
else
buffer_fast_strcat(wb, "null", 4);
}
- else {
- if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
- n = -n;
+ else
+ buffer_print_netdata_double(wb, n);
- if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
- n = n * 100 / total;
+ buffer_fast_strcat(wb, post_value, post_value_len);
+ }
- if(unlikely(set_min_max)) {
- r->min = r->max = n;
- set_min_max = 0;
- }
+ buffer_fast_strcat(wb, post_line, post_line_len);
+ }
+
+ buffer_strcat(wb, finish);
+ //info("RRD2JSON(): %s: END", r->st->id);
+}
+
+
+void rrdr2json_v2(RRDR *r, BUFFER *wb) {
+ QUERY_TARGET *qt = r->internal.qt;
+ RRDR_OPTIONS options = qt->window.options;
+
+ bool expose_gbc = query_target_aggregatable(qt);
+
+ buffer_json_member_add_object(wb, "result");
- if(n < r->min) r->min = n;
- if(n > r->max) r->max = n;
+ buffer_json_member_add_array(wb, "labels");
+ buffer_json_add_array_item_string(wb, "time");
+ long d, i;
+ const long used = (long)r->d;
+ for(d = 0, i = 0; d < used ; d++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[d], options))
+ continue;
+
+ buffer_json_add_array_item_string(wb, string2str(r->di[d]));
+ i++;
+ }
+ buffer_json_array_close(wb); // labels
+
+ buffer_json_member_add_object(wb, "point");
+ buffer_json_member_add_uint64(wb, "value", 0);
+ buffer_json_member_add_uint64(wb, "arp", 1);
+ buffer_json_member_add_uint64(wb, "pa", 2);
+ if(expose_gbc)
+ buffer_json_member_add_uint64(wb, "count", 3);
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_array(wb, "data");
+ if(i) {
+ long start = 0, end = rrdr_rows(r), step = 1;
+ if (!(options & RRDR_OPTION_REVERSED)) {
+ start = rrdr_rows(r) - 1;
+ end = -1;
+ step = -1;
+ }
+
+ // for each line in the array
+ for (i = start; i != end; i += step) {
+ NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
+ RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
+ NETDATA_DOUBLE *ar = &r->ar[ i * r->d ];
+ uint32_t *gbc = &r->gbc [ i * r->d ];
+ time_t now = r->t[i];
+
+ buffer_json_add_array_item_array(wb); // row
+
+ if (options & RRDR_OPTION_MILLISECONDS)
+ buffer_json_add_array_item_time_ms(wb, now); // the time
+ else
+ buffer_json_add_array_item_time_t(wb, now); // the time
+
+ for (d = 0; d < used; d++) {
+ if (!rrdr_dimension_should_be_exposed(r->od[d], options))
+ continue;
+
+ RRDR_VALUE_FLAGS o = co[d];
+
+ buffer_json_add_array_item_array(wb); // point
+
+ // add the value
+ NETDATA_DOUBLE n = cn[d];
+
+ if(o & RRDR_VALUE_EMPTY) {
+ if (unlikely(options & RRDR_OPTION_NULL2ZERO))
+ buffer_json_add_array_item_double(wb, 0);
+ else
+ buffer_json_add_array_item_double(wb, NAN);
}
+ else
+ buffer_json_add_array_item_double(wb, n);
+
+ // add the anomaly
+ buffer_json_add_array_item_double(wb, ar[d]);
- buffer_rrd_value(wb, n);
+ // add the point annotations
+ buffer_json_add_array_item_uint64(wb, o);
+
+ // add the count
+ if(expose_gbc)
+ buffer_json_add_array_item_uint64(wb, gbc[d]);
+
+ buffer_json_array_close(wb); // point
}
- buffer_fast_strcat(wb, post_value, post_value_len);
+ buffer_json_array_close(wb); // row
}
-
- buffer_fast_strcat(wb, post_line, post_line_len);
}
- buffer_strcat(wb, finish);
- //info("RRD2JSON(): %s: END", r->st->id);
+ buffer_json_array_close(wb); // data
+
+ buffer_json_object_close(wb); // annotations
}
diff --git a/web/api/formatters/json/json.h b/web/api/formatters/json/json.h
index fb59e5c9a..d1ab4f901 100644
--- a/web/api/formatters/json/json.h
+++ b/web/api/formatters/json/json.h
@@ -6,5 +6,6 @@
#include "../rrd2json.h"
void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable);
+void rrdr2json_v2(RRDR *r, BUFFER *wb);
#endif //NETDATA_API_FORMATTER_JSON_H
diff --git a/web/api/formatters/json_wrapper.c b/web/api/formatters/json_wrapper.c
index aa663495a..6bcbb8d5a 100644
--- a/web/api/formatters/json_wrapper.c
+++ b/web/api/formatters/json_wrapper.c
@@ -2,441 +2,868 @@
#include "json_wrapper.h"
-struct value_output {
- int c;
- BUFFER *wb;
-};
+static void jsonwrap_query_metric_plan(BUFFER *wb, QUERY_METRIC *qm) {
+ buffer_json_member_add_array(wb, "plans");
+ for (size_t p = 0; p < qm->plan.used; p++) {
+ QUERY_PLAN_ENTRY *qp = &qm->plan.array[p];
+
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_uint64(wb, "tr", qp->tier);
+ buffer_json_member_add_time_t(wb, "af", qp->after);
+ buffer_json_member_add_time_t(wb, "bf", qp->before);
+ buffer_json_object_close(wb);
+ }
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_array(wb, "tiers");
+ for (size_t tier = 0; tier < storage_tiers; tier++) {
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_uint64(wb, "tr", tier);
+ buffer_json_member_add_time_t(wb, "fe", qm->tiers[tier].db_first_time_s);
+ buffer_json_member_add_time_t(wb, "le", qm->tiers[tier].db_last_time_s);
+ buffer_json_member_add_int64(wb, "wg", qm->tiers[tier].weight);
+ buffer_json_object_close(wb);
+ }
+ buffer_json_array_close(wb);
+}
-static int value_list_output_callback(const DICTIONARY_ITEM *item __maybe_unused, void *entry, void *data) {
- struct value_output *ap = (struct value_output *)data;
- BUFFER *wb = ap->wb;
- char *output = (char *) entry;
- if(ap->c) buffer_strcat(wb, ",");
- buffer_strcat(wb, output);
- (ap->c)++;
- return 0;
+void jsonwrap_query_plan(RRDR *r, BUFFER *wb) {
+ QUERY_TARGET *qt = r->internal.qt;
+
+ buffer_json_member_add_object(wb, "query_plan");
+ for(size_t m = 0; m < qt->query.used; m++) {
+ QUERY_METRIC *qm = query_metric(qt, m);
+ buffer_json_member_add_object(wb, query_metric_id(qt, qm));
+ jsonwrap_query_metric_plan(wb, qm);
+ buffer_json_object_close(wb);
+ }
+ buffer_json_object_close(wb);
}
-static int fill_formatted_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) {
- (void)ls;
- DICTIONARY *dict = (DICTIONARY *)data;
- char n[RRD_ID_LENGTH_MAX * 2 + 2];
- char output[RRD_ID_LENGTH_MAX * 2 + 8];
- char v[RRD_ID_LENGTH_MAX * 2 + 1];
+static inline size_t rrdr_dimension_names(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
+ const size_t dimensions = r->d;
+ size_t c, i;
- sanitize_json_string(v, (char *)value, RRD_ID_LENGTH_MAX * 2);
- int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\", \"%s\"]", name, v);
- snprintfz(n, RRD_ID_LENGTH_MAX * 2, "%s:%s", name, v);
- dictionary_set(dict, n, output, len + 1);
+ buffer_json_member_add_array(wb, key);
+ for(c = 0, i = 0; c < dimensions ; c++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
- return 1;
+ buffer_json_add_array_item_string(wb, string2str(r->dn[c]));
+ i++;
+ }
+ buffer_json_array_close(wb);
+
+ return i;
}
-void rrdr_show_plan(RRDR *r, BUFFER *wb, const char *kq, const char *sq __maybe_unused) {
+static inline size_t rrdr_dimension_ids(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
+ const size_t dimensions = r->d;
+ size_t c, i;
+
+ buffer_json_member_add_array(wb, key);
+ for(c = 0, i = 0; c < dimensions ; c++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
+
+ buffer_json_add_array_item_string(wb, string2str(r->di[c]));
+ i++;
+ }
+ buffer_json_array_close(wb);
+
+ return i;
+}
+
+static inline long jsonwrap_v1_chart_ids(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
QUERY_TARGET *qt = r->internal.qt;
+ const long query_used = qt->query.used;
+ long c, i;
- buffer_sprintf(wb, "\n\t%squery_plan%s: {", kq, kq);
+ buffer_json_member_add_array(wb, key);
+ for (c = 0, i = 0; c < query_used; c++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
- for(size_t m = 0; m < qt->query.used; m++) {
- QUERY_METRIC *qm = &qt->query.array[m];
+ QUERY_METRIC *qm = query_metric(qt, c);
+ QUERY_INSTANCE *qi = query_instance(qt, qm->link.query_instance_id);
+ buffer_json_add_array_item_string(wb, rrdinstance_acquired_id(qi->ria));
+ i++;
+ }
+ buffer_json_array_close(wb);
- if(m)
- buffer_strcat(wb, ",");
+ return i;
+}
- buffer_sprintf(wb, "\n\t\t%s%s%s: {", kq, string2str(qm->dimension.id), kq);
+struct summary_total_counts {
+ size_t selected;
+ size_t excluded;
+ size_t queried;
+ size_t failed;
+};
- buffer_sprintf(wb, "\n\t\t\t%splans%s: [", kq, kq);
- for(size_t p = 0; p < qm->plan.used ;p++) {
- QUERY_PLAN_ENTRY *qp = &qm->plan.array[p];
- if(p)
- buffer_strcat(wb, ",");
+static inline void aggregate_into_summary_totals(struct summary_total_counts *totals, QUERY_METRICS_COUNTS *metrics) {
+ if(unlikely(!totals || !metrics))
+ return;
- buffer_strcat(wb, "\n\t\t\t\t{");
- buffer_sprintf(wb, "\n\t\t\t\t\t%stier%s: %zu,", kq, kq, qp->tier);
- buffer_sprintf(wb, "\n\t\t\t\t\t%safter%s: %ld,", kq, kq, qp->after);
- buffer_sprintf(wb, "\n\t\t\t\t\t%sbefore%s: %ld", kq, kq, qp->before);
- buffer_strcat(wb, "\n\t\t\t\t}");
- }
- buffer_strcat(wb, "\n\t\t\t],");
-
- buffer_sprintf(wb, "\n\t\t\t%stiers%s: [", kq, kq);
- for(size_t tier = 0; tier < storage_tiers ;tier++) {
- if(tier)
- buffer_strcat(wb, ",");
-
- buffer_strcat(wb, "\n\t\t\t\t{");
- buffer_sprintf(wb, "\n\t\t\t\t\t%stier%s: %zu,", kq, kq, tier);
- buffer_sprintf(wb, "\n\t\t\t\t\t%sdb_first_time%s: %ld,", kq, kq, qm->tiers[tier].db_first_time_s);
- buffer_sprintf(wb, "\n\t\t\t\t\t%sdb_last_time%s: %ld,", kq, kq, qm->tiers[tier].db_last_time_s);
- buffer_sprintf(wb, "\n\t\t\t\t\t%sweight%s: %ld", kq, kq, qm->tiers[tier].weight);
- buffer_strcat(wb, "\n\t\t\t\t}");
- }
- buffer_strcat(wb, "\n\t\t\t]");
+ if(metrics->selected) {
+ totals->selected++;
+
+ if(metrics->queried)
+ totals->queried++;
- buffer_strcat(wb, "\n\t\t}");
+ else if(metrics->failed)
+ totals->failed++;
}
+ else
+ totals->excluded++;
+}
+
+static inline void query_target_total_counts(BUFFER *wb, const char *key, struct summary_total_counts *totals) {
+ if(!totals->selected && !totals->queried && !totals->failed && !totals->excluded)
+ return;
- buffer_strcat(wb, "\n\t},");
+ buffer_json_member_add_object(wb, key);
+
+ if(totals->selected)
+ buffer_json_member_add_uint64(wb, "sl", totals->selected);
+
+ if(totals->excluded)
+ buffer_json_member_add_uint64(wb, "ex", totals->excluded);
+
+ if(totals->queried)
+ buffer_json_member_add_uint64(wb, "qr", totals->queried);
+
+ if(totals->failed)
+ buffer_json_member_add_uint64(wb, "fl", totals->failed);
+
+ buffer_json_object_close(wb);
}
-void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value,
- RRDR_GROUPING group_method)
-{
- QUERY_TARGET *qt = r->internal.qt;
+static inline void query_target_metric_counts(BUFFER *wb, QUERY_METRICS_COUNTS *metrics) {
+ if(!metrics->selected && !metrics->queried && !metrics->failed && !metrics->excluded)
+ return;
- long rows = rrdr_rows(r);
- long c, i;
- const long query_used = qt->query.used;
+ buffer_json_member_add_object(wb, "ds");
- //info("JSONWRAPPER(): %s: BEGIN", r->st->id);
- char kq[2] = "", // key quote
- sq[2] = ""; // string quote
+ if(metrics->selected)
+ buffer_json_member_add_uint64(wb, "sl", metrics->selected);
- if( options & RRDR_OPTION_GOOGLE_JSON ) {
- kq[0] = '\0';
- sq[0] = '\'';
+ if(metrics->excluded)
+ buffer_json_member_add_uint64(wb, "ex", metrics->excluded);
+
+ if(metrics->queried)
+ buffer_json_member_add_uint64(wb, "qr", metrics->queried);
+
+ if(metrics->failed)
+ buffer_json_member_add_uint64(wb, "fl", metrics->failed);
+
+ buffer_json_object_close(wb);
+}
+
+static inline void query_target_instance_counts(BUFFER *wb, QUERY_INSTANCES_COUNTS *instances) {
+ if(!instances->selected && !instances->queried && !instances->failed && !instances->excluded)
+ return;
+
+ buffer_json_member_add_object(wb, "is");
+
+ if(instances->selected)
+ buffer_json_member_add_uint64(wb, "sl", instances->selected);
+
+ if(instances->excluded)
+ buffer_json_member_add_uint64(wb, "ex", instances->excluded);
+
+ if(instances->queried)
+ buffer_json_member_add_uint64(wb, "qr", instances->queried);
+
+ if(instances->failed)
+ buffer_json_member_add_uint64(wb, "fl", instances->failed);
+
+ buffer_json_object_close(wb);
+}
+
+static inline void query_target_alerts_counts(BUFFER *wb, QUERY_ALERTS_COUNTS *alerts, const char *name, bool array) {
+ if(!alerts->clear && !alerts->other && !alerts->critical && !alerts->warning)
+ return;
+
+ if(array)
+ buffer_json_add_array_item_object(wb);
+ else
+ buffer_json_member_add_object(wb, "al");
+
+ if(name)
+ buffer_json_member_add_string(wb, "nm", name);
+
+ if(alerts->clear)
+ buffer_json_member_add_uint64(wb, "cl", alerts->clear);
+
+ if(alerts->warning)
+ buffer_json_member_add_uint64(wb, "wr", alerts->warning);
+
+ if(alerts->critical)
+ buffer_json_member_add_uint64(wb, "cr", alerts->critical);
+
+ if(alerts->other)
+ buffer_json_member_add_uint64(wb, "ot", alerts->other);
+
+ buffer_json_object_close(wb);
+}
+
+static inline void query_target_points_statistics(BUFFER *wb, QUERY_TARGET *qt, STORAGE_POINT *sp) {
+ if(!sp->count)
+ return;
+
+ buffer_json_member_add_object(wb, "sts");
+
+ buffer_json_member_add_double(wb, "min", sp->min);
+ buffer_json_member_add_double(wb, "max", sp->max);
+
+ if(query_target_aggregatable(qt)) {
+ buffer_json_member_add_uint64(wb, "cnt", sp->count);
+
+ if(sp->sum != 0.0) {
+ buffer_json_member_add_double(wb, "sum", sp->sum);
+ buffer_json_member_add_double(wb, "vol", sp->sum * (NETDATA_DOUBLE) query_view_update_every(qt));
+ }
+
+ if(sp->anomaly_count != 0)
+ buffer_json_member_add_uint64(wb, "arc", sp->anomaly_count);
}
else {
- kq[0] = '"';
- sq[0] = '"';
+ NETDATA_DOUBLE avg = (sp->count) ? sp->sum / (NETDATA_DOUBLE)sp->count : 0.0;
+ if(avg != 0.0)
+ buffer_json_member_add_double(wb, "avg", avg);
+
+ NETDATA_DOUBLE arp = storage_point_anomaly_rate(*sp);
+ if(arp != 0.0)
+ buffer_json_member_add_double(wb, "arp", arp);
+
+ NETDATA_DOUBLE con = (qt->query_points.sum > 0.0) ? sp->sum * 100.0 / qt->query_points.sum : 0.0;
+ if(con != 0.0)
+ buffer_json_member_add_double(wb, "con", con);
}
+ buffer_json_object_close(wb);
+}
- buffer_sprintf(wb, "{\n"
- " %sapi%s: 1,\n"
- " %sid%s: %s%s%s,\n"
- " %sname%s: %s%s%s,\n"
- " %sview_update_every%s: %lld,\n"
- " %supdate_every%s: %lld,\n"
- " %sfirst_entry%s: %lld,\n"
- " %slast_entry%s: %lld,\n"
- " %sbefore%s: %lld,\n"
- " %safter%s: %lld,\n"
- " %sgroup%s: %s%s%s,\n"
- " %soptions%s: %s"
- , kq, kq
- , kq, kq, sq, qt->id, sq
- , kq, kq, sq, qt->id, sq
- , kq, kq, (long long)r->update_every
- , kq, kq, (long long)qt->db.minimum_latest_update_every_s
- , kq, kq, (long long)qt->db.first_time_s
- , kq, kq, (long long)qt->db.last_time_s
- , kq, kq, (long long)r->before
- , kq, kq, (long long)r->after
- , kq, kq, sq, web_client_api_request_v1_data_group_to_string(group_method), sq
- , kq, kq, sq);
-
- web_client_api_request_v1_data_options_to_buffer(wb, r->internal.query_options);
-
- buffer_sprintf(wb, "%s,\n %sdimension_names%s: [", sq, kq, kq);
-
- for(c = 0, i = 0; c < query_used ; c++) {
- if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- if(unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
- if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
-
- if(i) buffer_strcat(wb, ", ");
- buffer_strcat(wb, sq);
- buffer_strcat(wb, string2str(qt->query.array[c].dimension.name));
- buffer_strcat(wb, sq);
- i++;
+static void query_target_summary_nodes_v2(BUFFER *wb, QUERY_TARGET *qt, const char *key, struct summary_total_counts *totals) {
+ buffer_json_member_add_array(wb, key);
+ for (size_t c = 0; c < qt->nodes.used; c++) {
+ QUERY_NODE *qn = query_node(qt, c);
+ RRDHOST *host = qn->rrdhost;
+ buffer_json_add_array_item_object(wb);
+ buffer_json_node_add_v2(wb, host, qn->slot, qn->duration_ut);
+ query_target_instance_counts(wb, &qn->instances);
+ query_target_metric_counts(wb, &qn->metrics);
+ query_target_alerts_counts(wb, &qn->alerts, NULL, false);
+ query_target_points_statistics(wb, qt, &qn->query_points);
+ buffer_json_object_close(wb);
+
+ aggregate_into_summary_totals(totals, &qn->metrics);
}
- if(!i) {
-#ifdef NETDATA_INTERNAL_CHECKS
- error("QUERY: '%s', RRDR is empty, %zu dimensions, options is 0x%08x", qt->id, r->d, options);
-#endif
- rows = 0;
- buffer_strcat(wb, sq);
- buffer_strcat(wb, "no data");
- buffer_strcat(wb, sq);
+ buffer_json_array_close(wb);
+}
+
+static size_t query_target_summary_contexts_v2(BUFFER *wb, QUERY_TARGET *qt, const char *key, struct summary_total_counts *totals) {
+ buffer_json_member_add_array(wb, key);
+ DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
+
+ struct {
+ STORAGE_POINT query_points;
+ QUERY_INSTANCES_COUNTS instances;
+ QUERY_METRICS_COUNTS metrics;
+ QUERY_ALERTS_COUNTS alerts;
+ } *z;
+
+ for (long c = 0; c < (long) qt->contexts.used; c++) {
+ QUERY_CONTEXT *qc = query_context(qt, c);
+
+ z = dictionary_set(dict, rrdcontext_acquired_id(qc->rca), NULL, sizeof(*z));
+
+ z->instances.selected += qc->instances.selected;
+ z->instances.excluded += qc->instances.selected;
+ z->instances.queried += qc->instances.queried;
+ z->instances.failed += qc->instances.failed;
+
+ z->metrics.selected += qc->metrics.selected;
+ z->metrics.excluded += qc->metrics.excluded;
+ z->metrics.queried += qc->metrics.queried;
+ z->metrics.failed += qc->metrics.failed;
+
+ z->alerts.clear += qc->alerts.clear;
+ z->alerts.warning += qc->alerts.warning;
+ z->alerts.critical += qc->alerts.critical;
+
+ storage_point_merge_to(z->query_points, qc->query_points);
}
- buffer_sprintf(wb, "],\n"
- " %sdimension_ids%s: ["
- , kq, kq);
+ size_t unique_contexts = dictionary_entries(dict);
+ dfe_start_read(dict, z) {
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "id", z_dfe.name);
+ query_target_instance_counts(wb, &z->instances);
+ query_target_metric_counts(wb, &z->metrics);
+ query_target_alerts_counts(wb, &z->alerts, NULL, false);
+ query_target_points_statistics(wb, qt, &z->query_points);
+ buffer_json_object_close(wb);
+
+ aggregate_into_summary_totals(totals, &z->metrics);
+ }
+ dfe_done(z);
+ buffer_json_array_close(wb);
+ dictionary_destroy(dict);
- for(c = 0, i = 0; c < query_used ; c++) {
- if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- if(unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
- if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+ return unique_contexts;
+}
- if(i) buffer_strcat(wb, ", ");
- buffer_strcat(wb, sq);
- buffer_strcat(wb, string2str(qt->query.array[c].dimension.id));
- buffer_strcat(wb, sq);
- i++;
+static void query_target_summary_instances_v1(BUFFER *wb, QUERY_TARGET *qt, const char *key) {
+ char name[RRD_ID_LENGTH_MAX * 2 + 2];
+
+ buffer_json_member_add_array(wb, key);
+ DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
+ for (long c = 0; c < (long) qt->instances.used; c++) {
+ QUERY_INSTANCE *qi = query_instance(qt, c);
+
+ snprintfz(name, RRD_ID_LENGTH_MAX * 2 + 1, "%s:%s",
+ rrdinstance_acquired_id(qi->ria),
+ rrdinstance_acquired_name(qi->ria));
+
+ bool *set = dictionary_set(dict, name, NULL, sizeof(*set));
+ if (!*set) {
+ *set = true;
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_string(wb, rrdinstance_acquired_id(qi->ria));
+ buffer_json_add_array_item_string(wb, rrdinstance_acquired_name(qi->ria));
+ buffer_json_array_close(wb);
+ }
}
- if(!i) {
- rows = 0;
- buffer_strcat(wb, sq);
- buffer_strcat(wb, "no data");
- buffer_strcat(wb, sq);
+ dictionary_destroy(dict);
+ buffer_json_array_close(wb);
+}
+
+static void query_target_summary_instances_v2(BUFFER *wb, QUERY_TARGET *qt, const char *key, struct summary_total_counts *totals) {
+ buffer_json_member_add_array(wb, key);
+ for (long c = 0; c < (long) qt->instances.used; c++) {
+ QUERY_INSTANCE *qi = query_instance(qt, c);
+// QUERY_HOST *qh = query_host(qt, qi->query_host_id);
+
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "id", rrdinstance_acquired_id(qi->ria));
+
+ if(!rrdinstance_acquired_id_and_name_are_same(qi->ria))
+ buffer_json_member_add_string(wb, "nm", rrdinstance_acquired_name(qi->ria));
+
+ buffer_json_member_add_uint64(wb, "ni", qi->query_host_id);
+// buffer_json_member_add_string(wb, "id", string2str(qi->id_fqdn));
+// buffer_json_member_add_string(wb, "nm", string2str(qi->name_fqdn));
+// buffer_json_member_add_string(wb, "lc", rrdinstance_acquired_name(qi->ria));
+// buffer_json_member_add_string(wb, "mg", qh->host->machine_guid);
+// if(qh->node_id[0])
+// buffer_json_member_add_string(wb, "nd", qh->node_id);
+ query_target_metric_counts(wb, &qi->metrics);
+ query_target_alerts_counts(wb, &qi->alerts, NULL, false);
+ query_target_points_statistics(wb, qt, &qi->query_points);
+ buffer_json_object_close(wb);
+
+ aggregate_into_summary_totals(totals, &qi->metrics);
}
- buffer_strcat(wb, "],\n");
+ buffer_json_array_close(wb);
+}
- if (r->internal.query_options & RRDR_OPTION_ALL_DIMENSIONS) {
- buffer_sprintf(wb, " %sfull_dimension_list%s: [", kq, kq);
+struct dimensions_sorted_walkthrough_data {
+ BUFFER *wb;
+ struct summary_total_counts *totals;
+ QUERY_TARGET *qt;
+};
- char name[RRD_ID_LENGTH_MAX * 2 + 2];
- char output[RRD_ID_LENGTH_MAX * 2 + 8];
+struct dimensions_sorted_entry {
+ const char *id;
+ const char *name;
+ STORAGE_POINT query_points;
+ QUERY_METRICS_COUNTS metrics;
+ uint32_t priority;
+};
- struct value_output co = {.c = 0, .wb = wb};
+static int dimensions_sorted_walktrhough_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) {
+ struct dimensions_sorted_walkthrough_data *sdwd = data;
+ BUFFER *wb = sdwd->wb;
+ struct summary_total_counts *totals = sdwd->totals;
+ QUERY_TARGET *qt = sdwd->qt;
+ struct dimensions_sorted_entry *z = value;
- DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
- for (c = 0; c < (long)qt->metrics.used ;c++) {
- snprintfz(name, RRD_ID_LENGTH_MAX * 2 + 1, "%s:%s",
- rrdmetric_acquired_id(qt->metrics.array[c]),
- rrdmetric_acquired_name(qt->metrics.array[c]));
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "id", z->id);
+ if (z->id != z->name && z->name)
+ buffer_json_member_add_string(wb, "nm", z->name);
- int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\",\"%s\"]",
- rrdmetric_acquired_id(qt->metrics.array[c]),
- rrdmetric_acquired_name(qt->metrics.array[c]));
+ query_target_metric_counts(wb, &z->metrics);
+ query_target_points_statistics(wb, qt, &z->query_points);
+ buffer_json_member_add_uint64(wb, "pri", z->priority);
+ buffer_json_object_close(wb);
- dictionary_set(dict, name, output, len + 1);
- }
- dictionary_walkthrough_read(dict, value_list_output_callback, &co);
- dictionary_destroy(dict);
+ aggregate_into_summary_totals(totals, &z->metrics);
- co.c = 0;
- buffer_sprintf(wb, "],\n %sfull_chart_list%s: [", kq, kq);
- dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
- for (c = 0; c < (long)qt->instances.used ; c++) {
- RRDINSTANCE_ACQUIRED *ria = qt->instances.array[c];
+ return 1;
+}
- snprintfz(name, RRD_ID_LENGTH_MAX * 2 + 1, "%s:%s",
- rrdinstance_acquired_id(ria),
- rrdinstance_acquired_name(ria));
+int dimensions_sorted_compar(const DICTIONARY_ITEM **item1, const DICTIONARY_ITEM **item2) {
+ struct dimensions_sorted_entry *z1 = dictionary_acquired_item_value(*item1);
+ struct dimensions_sorted_entry *z2 = dictionary_acquired_item_value(*item2);
- int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\",\"%s\"]",
- rrdinstance_acquired_id(ria),
- rrdinstance_acquired_name(ria));
+ if(z1->priority == z2->priority)
+ return strcmp(dictionary_acquired_item_name(*item1), dictionary_acquired_item_name(*item2));
+ else if(z1->priority < z2->priority)
+ return -1;
+ else
+ return 1;
+}
- dictionary_set(dict, name, output, len + 1);
+static void query_target_summary_dimensions_v12(BUFFER *wb, QUERY_TARGET *qt, const char *key, bool v2, struct summary_total_counts *totals) {
+ char buf[RRD_ID_LENGTH_MAX * 2 + 2];
+
+ buffer_json_member_add_array(wb, key);
+ DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
+ struct dimensions_sorted_entry *z;
+ size_t q = 0;
+ for (long c = 0; c < (long) qt->dimensions.used; c++) {
+ QUERY_DIMENSION * qd = query_dimension(qt, c);
+ RRDMETRIC_ACQUIRED *rma = qd->rma;
+
+ QUERY_METRIC *qm = NULL;
+ for( ; q < qt->query.used ;q++) {
+ QUERY_METRIC *tqm = query_metric(qt, q);
+ QUERY_DIMENSION *tqd = query_dimension(qt, tqm->link.query_dimension_id);
+ if(tqd->rma != rma) break;
+ qm = tqm;
}
- dictionary_walkthrough_read(dict, value_list_output_callback, &co);
- dictionary_destroy(dict);
- co.c = 0;
- buffer_sprintf(wb, "],\n %sfull_chart_labels%s: [", kq, kq);
- dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
- for (c = 0; c < (long)qt->instances.used ; c++) {
- RRDINSTANCE_ACQUIRED *ria = qt->instances.array[c];
- rrdlabels_walkthrough_read(rrdinstance_acquired_labels(ria), fill_formatted_callback, dict);
+ const char *key, *id, *name;
+
+ if(v2) {
+ key = rrdmetric_acquired_name(rma);
+ id = key;
+ name = key;
+ }
+ else {
+ snprintfz(buf, RRD_ID_LENGTH_MAX * 2 + 1, "%s:%s",
+ rrdmetric_acquired_id(rma),
+ rrdmetric_acquired_name(rma));
+ key = buf;
+ id = rrdmetric_acquired_id(rma);
+ name = rrdmetric_acquired_name(rma);
}
- dictionary_walkthrough_read(dict, value_list_output_callback, &co);
- dictionary_destroy(dict);
- buffer_strcat(wb, "],\n");
- }
- // functions
- {
- DICTIONARY *funcs = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
- RRDINSTANCE_ACQUIRED *ria = NULL;
- for (c = 0; c < query_used ; c++) {
- QUERY_METRIC *qm = &qt->query.array[c];
- if(qm->link.ria == ria)
- continue;
+ z = dictionary_set(dict, key, NULL, sizeof(*z));
+ if(!z->id) {
+ z->id = id;
+ z->name = name;
+ z->priority = qd->priority;
+ }
+ else {
+ if(qd->priority < z->priority)
+ z->priority = qd->priority;
+ }
- ria = qm->link.ria;
- chart_functions_to_dict(rrdinstance_acquired_functions(ria), funcs);
+ if(qm) {
+ z->metrics.selected += (qm->status & RRDR_DIMENSION_SELECTED) ? 1 : 0;
+ z->metrics.failed += (qm->status & RRDR_DIMENSION_FAILED) ? 1 : 0;
+
+ if(qm->status & RRDR_DIMENSION_QUERIED) {
+ z->metrics.queried++;
+ storage_point_merge_to(z->query_points, qm->query_points);
+ }
}
+ else
+ z->metrics.excluded++;
+ }
- buffer_sprintf(wb, " %sfunctions%s: [", kq, kq);
- void *t; (void)t;
- dfe_start_read(funcs, t) {
- const char *comma = "";
- if(t_dfe.counter) comma = ", ";
- buffer_sprintf(wb, "%s%s%s%s", comma, sq, t_dfe.name, sq);
+ if(v2) {
+ struct dimensions_sorted_walkthrough_data t = {
+ .wb = wb,
+ .totals = totals,
+ .qt = qt,
+ };
+ dictionary_sorted_walkthrough_rw(dict, DICTIONARY_LOCK_READ, dimensions_sorted_walktrhough_cb,
+ &t, dimensions_sorted_compar);
+ }
+ else {
+ // v1
+ dfe_start_read(dict, z) {
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_string(wb, z->id);
+ buffer_json_add_array_item_string(wb, z->name);
+ buffer_json_array_close(wb);
}
- dfe_done(t);
- dictionary_destroy(funcs);
- buffer_strcat(wb, "],\n");
+ dfe_done(z);
}
+ dictionary_destroy(dict);
+ buffer_json_array_close(wb);
+}
- // context query
- if (!qt->request.st) {
- buffer_sprintf(
- wb,
- " %schart_ids%s: [",
- kq, kq);
+struct rrdlabels_formatting_v2 {
+ DICTIONARY *keys;
+ QUERY_INSTANCE *qi;
+ bool v2;
+};
- for (c = 0, i = 0; c < query_used; c++) {
- QUERY_METRIC *qm = &qt->query.array[c];
+struct rrdlabels_keys_dict_entry {
+ const char *name;
+ DICTIONARY *values;
+ STORAGE_POINT query_points;
+ QUERY_METRICS_COUNTS metrics;
+};
- if (unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- if (unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
- if (unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO)))
- continue;
+struct rrdlabels_key_value_dict_entry {
+ const char *key;
+ const char *value;
+ STORAGE_POINT query_points;
+ QUERY_METRICS_COUNTS metrics;
+};
- if (i)
- buffer_strcat(wb, ", ");
- buffer_strcat(wb, sq);
- buffer_strcat(wb, string2str(qm->chart.id));
- buffer_strcat(wb, sq);
- i++;
- }
- if (!i) {
- rows = 0;
- buffer_strcat(wb, sq);
- buffer_strcat(wb, "no data");
- buffer_strcat(wb, sq);
- }
- buffer_strcat(wb, "],\n");
- if (qt->instances.chart_label_key_pattern) {
- buffer_sprintf(wb, " %schart_labels%s: { ", kq, kq);
-
- SIMPLE_PATTERN *pattern = qt->instances.chart_label_key_pattern;
- char *label_key = NULL;
- int keys = 0;
- while (pattern && (label_key = simple_pattern_iterate(&pattern))) {
- if (keys)
- buffer_strcat(wb, ", ");
- buffer_sprintf(wb, "%s%s%s : [", kq, label_key, kq);
- keys++;
-
- for (c = 0, i = 0; c < query_used; c++) {
- QUERY_METRIC *qm = &qt->query.array[c];
-
- if (unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- if (unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
- if (unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO)))
- continue;
+static int rrdlabels_formatting_v2(const char *name, const char *value, RRDLABEL_SRC ls __maybe_unused, void *data) {
+ struct rrdlabels_formatting_v2 *t = data;
- if (i)
- buffer_strcat(wb, ", ");
- rrdlabels_get_value_to_buffer_or_null(rrdinstance_acquired_labels(qm->link.ria), wb, label_key, sq, "null");
- i++;
+ struct rrdlabels_keys_dict_entry *d = dictionary_set(t->keys, name, NULL, sizeof(*d));
+ if(!d->values) {
+ d->name = name;
+ d->values = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
+ }
+
+ char n[RRD_ID_LENGTH_MAX * 2 + 2];
+ snprintfz(n, RRD_ID_LENGTH_MAX * 2, "%s:%s", name, value);
+
+ struct rrdlabels_key_value_dict_entry *z = dictionary_set(d->values, n, NULL, sizeof(*z));
+ if(!z->key) {
+ z->key = name;
+ z->value = value;
+ }
+
+ if(t->v2) {
+ QUERY_INSTANCE *qi = t->qi;
+
+ z->metrics.selected += qi->metrics.selected;
+ z->metrics.excluded += qi->metrics.excluded;
+ z->metrics.queried += qi->metrics.queried;
+ z->metrics.failed += qi->metrics.failed;
+
+ d->metrics.selected += qi->metrics.selected;
+ d->metrics.excluded += qi->metrics.excluded;
+ d->metrics.queried += qi->metrics.queried;
+ d->metrics.failed += qi->metrics.failed;
+
+ storage_point_merge_to(z->query_points, qi->query_points);
+ storage_point_merge_to(d->query_points, qi->query_points);
+ }
+
+ return 1;
+}
+
+static void query_target_summary_labels_v12(BUFFER *wb, QUERY_TARGET *qt, const char *key, bool v2, struct summary_total_counts *key_totals, struct summary_total_counts *value_totals) {
+ buffer_json_member_add_array(wb, key);
+ struct rrdlabels_formatting_v2 t = {
+ .keys = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE),
+ .v2 = v2,
+ };
+ for (long c = 0; c < (long) qt->instances.used; c++) {
+ QUERY_INSTANCE *qi = query_instance(qt, c);
+ RRDINSTANCE_ACQUIRED *ria = qi->ria;
+ t.qi = qi;
+ rrdlabels_walkthrough_read(rrdinstance_acquired_labels(ria), rrdlabels_formatting_v2, &t);
+ }
+ struct rrdlabels_keys_dict_entry *d;
+ dfe_start_read(t.keys, d) {
+ if(v2) {
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "id", d_dfe.name);
+ query_target_metric_counts(wb, &d->metrics);
+ query_target_points_statistics(wb, qt, &d->query_points);
+ aggregate_into_summary_totals(key_totals, &d->metrics);
+ buffer_json_member_add_array(wb, "vl");
}
- if (!i) {
- rows = 0;
- buffer_strcat(wb, sq);
- buffer_strcat(wb, "no data");
- buffer_strcat(wb, sq);
+ struct rrdlabels_key_value_dict_entry *z;
+ dfe_start_read(d->values, z){
+ if (v2) {
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "id", z->value);
+ query_target_metric_counts(wb, &z->metrics);
+ query_target_points_statistics(wb, qt, &z->query_points);
+ buffer_json_object_close(wb);
+ aggregate_into_summary_totals(value_totals, &z->metrics);
+ } else {
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_string(wb, z->key);
+ buffer_json_add_array_item_string(wb, z->value);
+ buffer_json_array_close(wb);
+ }
+ }
+ dfe_done(z);
+ dictionary_destroy(d->values);
+ if(v2) {
+ buffer_json_array_close(wb);
+ buffer_json_object_close(wb);
}
- buffer_strcat(wb, "]");
}
- buffer_strcat(wb, "},\n");
+ dfe_done(d);
+ dictionary_destroy(t.keys);
+ buffer_json_array_close(wb);
+}
+
+static void query_target_summary_alerts_v2(BUFFER *wb, QUERY_TARGET *qt, const char *key) {
+ buffer_json_member_add_array(wb, key);
+ QUERY_ALERTS_COUNTS *z;
+
+ DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
+ for (long c = 0; c < (long) qt->instances.used; c++) {
+ QUERY_INSTANCE *qi = query_instance(qt, c);
+ RRDSET *st = rrdinstance_acquired_rrdset(qi->ria);
+ if (st) {
+ netdata_rwlock_rdlock(&st->alerts.rwlock);
+ if (st->alerts.base) {
+ for (RRDCALC *rc = st->alerts.base; rc; rc = rc->next) {
+ z = dictionary_set(dict, string2str(rc->name), NULL, sizeof(*z));
+
+ switch(rc->status) {
+ case RRDCALC_STATUS_CLEAR:
+ z->clear++;
+ break;
+
+ case RRDCALC_STATUS_WARNING:
+ z->warning++;
+ break;
+
+ case RRDCALC_STATUS_CRITICAL:
+ z->critical++;
+ break;
+
+ default:
+ case RRDCALC_STATUS_UNINITIALIZED:
+ case RRDCALC_STATUS_UNDEFINED:
+ case RRDCALC_STATUS_REMOVED:
+ z->other++;
+ break;
+ }
+ }
+ }
+ netdata_rwlock_unlock(&st->alerts.rwlock);
+ }
+ }
+ dfe_start_read(dict, z)
+ query_target_alerts_counts(wb, z, z_dfe.name, true);
+ dfe_done(z);
+ dictionary_destroy(dict);
+ buffer_json_array_close(wb); // alerts
+}
+
+static inline void query_target_functions(BUFFER *wb, const char *key, RRDR *r) {
+ QUERY_TARGET *qt = r->internal.qt;
+ const long query_used = qt->query.used;
+
+ DICTIONARY *funcs = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
+ RRDINSTANCE_ACQUIRED *ria = NULL;
+ for (long c = 0; c < query_used ; c++) {
+ QUERY_METRIC *qm = query_metric(qt, c);
+ QUERY_INSTANCE *qi = query_instance(qt, qm->link.query_instance_id);
+ if(qi->ria == ria)
+ continue;
+
+ ria = qi->ria;
+ chart_functions_to_dict(rrdinstance_acquired_functions(ria), funcs);
+ }
+
+ buffer_json_member_add_array(wb, key);
+ void *t; (void)t;
+ dfe_start_read(funcs, t)
+ buffer_json_add_array_item_string(wb, t_dfe.name);
+ dfe_done(t);
+ dictionary_destroy(funcs);
+ buffer_json_array_close(wb);
+}
+
+static inline long query_target_chart_labels_filter_v1(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
+ QUERY_TARGET *qt = r->internal.qt;
+ const long query_used = qt->query.used;
+ long c, i = 0;
+
+ buffer_json_member_add_object(wb, key);
+
+ SIMPLE_PATTERN *pattern = qt->instances.chart_label_key_pattern;
+ char *label_key = NULL;
+ while (pattern && (label_key = simple_pattern_iterate(&pattern))) {
+ buffer_json_member_add_array(wb, label_key);
+
+ for (c = 0, i = 0; c < query_used; c++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
+
+ QUERY_METRIC *qm = query_metric(qt, c);
+ QUERY_INSTANCE *qi = query_instance(qt, qm->link.query_instance_id);
+ rrdlabels_value_to_buffer_array_item_or_null(rrdinstance_acquired_labels(qi->ria), wb, label_key);
+ i++;
}
+ buffer_json_array_close(wb);
}
- buffer_sprintf(wb, " %slatest_values%s: ["
- , kq, kq);
+ buffer_json_object_close(wb);
+
+ return i;
+}
+
+static inline long query_target_metrics_latest_values(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
+ QUERY_TARGET *qt = r->internal.qt;
+ const long query_used = qt->query.used;
+ long c, i;
+
+ buffer_json_member_add_array(wb, key);
for(c = 0, i = 0; c < query_used ;c++) {
- QUERY_METRIC *qm = &qt->query.array[c];
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
+
+ QUERY_METRIC *qm = query_metric(qt, c);
+ QUERY_DIMENSION *qd = query_dimension(qt, qm->link.query_dimension_id);
+ buffer_json_add_array_item_double(wb, rrdmetric_acquired_last_stored_value(qd->rma));
+ i++;
+ }
- if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+ buffer_json_array_close(wb);
+
+ return i;
+}
+
+static inline size_t rrdr_dimension_view_latest_values(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
+ buffer_json_member_add_array(wb, key);
+
+ size_t c, i;
+ for(c = 0, i = 0; c < r->d ; c++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
- if(i) buffer_strcat(wb, ", ");
i++;
- NETDATA_DOUBLE value = rrdmetric_acquired_last_stored_value(qm->link.rma);
- if (NAN == value)
- buffer_strcat(wb, "null");
+ NETDATA_DOUBLE *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ];
+ RRDR_VALUE_FLAGS *co = &r->o[ (rrdr_rows(r) - 1) * r->d ];
+ NETDATA_DOUBLE n = cn[c];
+
+ if(co[c] & RRDR_VALUE_EMPTY) {
+ if(options & RRDR_OPTION_NULL2ZERO)
+ buffer_json_add_array_item_double(wb, 0.0);
+ else
+ buffer_json_add_array_item_double(wb, NAN);
+ }
else
- buffer_rrd_value(wb, value);
- }
- if(!i) {
- rows = 0;
- buffer_strcat(wb, "null");
+ buffer_json_add_array_item_double(wb, n);
}
- buffer_sprintf(wb, "],\n"
- " %sview_latest_values%s: ["
- , kq, kq);
+ buffer_json_array_close(wb);
- i = 0;
- if(rows) {
- NETDATA_DOUBLE total = 1;
+ return i;
+}
- if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
- total = 0;
- for(c = 0; c < query_used ;c++) {
- if(unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
+static inline void rrdr_dimension_query_points_statistics(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options, bool dview) {
+ STORAGE_POINT *sp = (dview) ? r->dview : r->dqp;
+ NETDATA_DOUBLE anomaly_rate_multiplier = (dview) ? RRDR_DVIEW_ANOMALY_COUNT_MULTIPLIER : 1.0;
- NETDATA_DOUBLE *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ];
- NETDATA_DOUBLE n = cn[c];
+ if(unlikely(!sp))
+ return;
- if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
- n = -n;
+ if(key)
+ buffer_json_member_add_object(wb, key);
- total += n;
- }
- // prevent a division by zero
- if(total == 0) total = 1;
- }
+ buffer_json_member_add_array(wb, "min");
+ for(size_t c = 0; c < r->d ; c++) {
+ if (!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
- for(c = 0, i = 0; c < query_used ;c++) {
- if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- if(unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
- if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+ buffer_json_add_array_item_double(wb, sp[c].min);
+ }
+ buffer_json_array_close(wb);
- if(i) buffer_strcat(wb, ", ");
- i++;
+ buffer_json_member_add_array(wb, "max");
+ for(size_t c = 0; c < r->d ; c++) {
+ if (!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
- NETDATA_DOUBLE *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ];
- RRDR_VALUE_FLAGS *co = &r->o[ (rrdr_rows(r) - 1) * r->d ];
- NETDATA_DOUBLE n = cn[c];
+ buffer_json_add_array_item_double(wb, sp[c].max);
+ }
+ buffer_json_array_close(wb);
- if(co[c] & RRDR_VALUE_EMPTY) {
- if(options & RRDR_OPTION_NULL2ZERO)
- buffer_strcat(wb, "0");
- else
- buffer_strcat(wb, "null");
- }
- else {
- if(unlikely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
- n = -n;
+ if(options & RRDR_OPTION_RETURN_RAW) {
+ buffer_json_member_add_array(wb, "sum");
+ for(size_t c = 0; c < r->d ; c++) {
+ if (!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
- if(unlikely(options & RRDR_OPTION_PERCENTAGE))
- n = n * 100 / total;
+ buffer_json_add_array_item_double(wb, sp[c].sum);
+ }
+ buffer_json_array_close(wb);
- buffer_rrd_value(wb, n);
- }
+ buffer_json_member_add_array(wb, "cnt");
+ for(size_t c = 0; c < r->d ; c++) {
+ if (!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
+
+ buffer_json_add_array_item_uint64(wb, sp[c].count);
}
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_array(wb, "arc");
+ for(size_t c = 0; c < r->d ; c++) {
+ if (!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
+
+ buffer_json_add_array_item_uint64(wb, storage_point_anomaly_rate(sp[c]) / anomaly_rate_multiplier / 100.0 * sp[c].count);
+ }
+ buffer_json_array_close(wb);
}
- if(!i) {
- rows = 0;
- buffer_strcat(wb, "null");
- }
+ else {
+ NETDATA_DOUBLE sum = 0.0;
+ for(size_t c = 0; c < r->d ; c++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
- buffer_sprintf(wb, "],\n"
- " %sdimensions%s: %ld,\n"
- " %spoints%s: %ld,\n"
- " %sformat%s: %s"
- , kq, kq, i
- , kq, kq, rows
- , kq, kq, sq
- );
+ sum += ABS(sp[c].sum);
+ }
- rrdr_buffer_print_format(wb, format);
+ buffer_json_member_add_array(wb, "avg");
+ for(size_t c = 0; c < r->d ; c++) {
+ if (!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
- buffer_sprintf(wb, "%s,\n"
- " %sdb_points_per_tier%s: [ "
- , sq
- , kq, kq
- );
+ buffer_json_add_array_item_double(wb, storage_point_average_value(sp[c]));
+ }
+ buffer_json_array_close(wb);
- for(size_t tier = 0; tier < storage_tiers ; tier++)
- buffer_sprintf(wb, "%s%zu", tier>0?", ":"", r->internal.tier_points_read[tier]);
+ buffer_json_member_add_array(wb, "arp");
+ for(size_t c = 0; c < r->d ; c++) {
+ if (!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
- buffer_strcat(wb, " ],");
+ buffer_json_add_array_item_double(wb, storage_point_anomaly_rate(sp[c]) / anomaly_rate_multiplier);
+ }
+ buffer_json_array_close(wb);
- if(options & RRDR_OPTION_SHOW_PLAN)
- rrdr_show_plan(r, wb, kq, sq);
+ buffer_json_member_add_array(wb, "con");
+ for(size_t c = 0; c < r->d ; c++) {
+ if (!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
- buffer_sprintf(wb, "\n %sresult%s: ", kq, kq);
+ NETDATA_DOUBLE con = (sum > 0.0) ? ABS(sp[c].sum) * 100.0 / sum : 0.0;
+ buffer_json_add_array_item_double(wb, con);
+ }
+ buffer_json_array_close(wb);
+ }
- if(string_value) buffer_strcat(wb, sq);
- //info("JSONWRAPPER(): %s: END", r->st->id);
+ if(key)
+ buffer_json_object_close(wb);
}
-void rrdr_json_wrapper_anomaly_rates(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value) {
- (void)r;
- (void)format;
+void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb) {
+ QUERY_TARGET *qt = r->internal.qt;
+ DATASOURCE_FORMAT format = qt->request.format;
+ RRDR_OPTIONS options = qt->window.options;
+
+ long rows = rrdr_rows(r);
char kq[2] = "", // key quote
- sq[2] = ""; // string quote
+ sq[2] = ""; // string quote
if( options & RRDR_OPTION_GOOGLE_JSON ) {
kq[0] = '\0';
@@ -447,31 +874,702 @@ void rrdr_json_wrapper_anomaly_rates(RRDR *r, BUFFER *wb, uint32_t format, uint3
sq[0] = '"';
}
- if(string_value) buffer_strcat(wb, sq);
+ buffer_json_initialize(wb, kq, sq, 0, true, options & RRDR_OPTION_MINIFY);
+
+ buffer_json_member_add_uint64(wb, "api", 1);
+ buffer_json_member_add_string(wb, "id", qt->id);
+ buffer_json_member_add_string(wb, "name", qt->id);
+ buffer_json_member_add_time_t(wb, "view_update_every", r->view.update_every);
+ buffer_json_member_add_time_t(wb, "update_every", qt->db.minimum_latest_update_every_s);
+ buffer_json_member_add_time_t(wb, "first_entry", qt->db.first_time_s);
+ buffer_json_member_add_time_t(wb, "last_entry", qt->db.last_time_s);
+ buffer_json_member_add_time_t(wb, "after", r->view.after);
+ buffer_json_member_add_time_t(wb, "before", r->view.before);
+ buffer_json_member_add_string(wb, "group", time_grouping_tostring(qt->request.time_group_method));
+ web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", options);
+
+ if(!rrdr_dimension_names(wb, "dimension_names", r, options))
+ rows = 0;
+
+ if(!rrdr_dimension_ids(wb, "dimension_ids", r, options))
+ rows = 0;
+
+ if (options & RRDR_OPTION_ALL_DIMENSIONS) {
+ query_target_summary_instances_v1(wb, qt, "full_chart_list");
+ query_target_summary_dimensions_v12(wb, qt, "full_dimension_list", false, NULL);
+ query_target_summary_labels_v12(wb, qt, "full_chart_labels", false, NULL, NULL);
+ }
+
+ query_target_functions(wb, "functions", r);
+
+ if (!qt->request.st && !jsonwrap_v1_chart_ids(wb, "chart_ids", r, options))
+ rows = 0;
+
+ if (qt->instances.chart_label_key_pattern && !query_target_chart_labels_filter_v1(wb, "chart_labels", r, options))
+ rows = 0;
+
+ if(!query_target_metrics_latest_values(wb, "latest_values", r, options))
+ rows = 0;
+
+ size_t dimensions = rrdr_dimension_view_latest_values(wb, "view_latest_values", r, options);
+ if(!dimensions)
+ rows = 0;
+
+ buffer_json_member_add_uint64(wb, "dimensions", dimensions);
+ buffer_json_member_add_uint64(wb, "points", rows);
+ buffer_json_member_add_string(wb, "format", rrdr_format_to_string(format));
+
+ buffer_json_member_add_array(wb, "db_points_per_tier");
+ for(size_t tier = 0; tier < storage_tiers ; tier++)
+ buffer_json_add_array_item_uint64(wb, qt->db.tiers[tier].points);
+ buffer_json_array_close(wb);
+
+ if(options & RRDR_OPTION_DEBUG)
+ jsonwrap_query_plan(r, wb);
+}
+
+static void rrdset_rrdcalc_entries_v2(BUFFER *wb, RRDINSTANCE_ACQUIRED *ria) {
+ RRDSET *st = rrdinstance_acquired_rrdset(ria);
+ if(st) {
+ netdata_rwlock_rdlock(&st->alerts.rwlock);
+ if(st->alerts.base) {
+ buffer_json_member_add_object(wb, "alerts");
+ for(RRDCALC *rc = st->alerts.base; rc ;rc = rc->next) {
+ if(rc->status < RRDCALC_STATUS_CLEAR)
+ continue;
+
+ buffer_json_member_add_object(wb, string2str(rc->name));
+ buffer_json_member_add_string(wb, "st", rrdcalc_status2string(rc->status));
+ buffer_json_member_add_double(wb, "vl", rc->value);
+ buffer_json_member_add_string(wb, "un", string2str(rc->units));
+ buffer_json_object_close(wb);
+ }
+ buffer_json_object_close(wb);
+ }
+ netdata_rwlock_unlock(&st->alerts.rwlock);
+ }
+}
+
+static void query_target_combined_units_v2(BUFFER *wb, QUERY_TARGET *qt, size_t contexts, bool ignore_percentage) {
+ if(!ignore_percentage && query_target_has_percentage_units(qt)) {
+ buffer_json_member_add_string(wb, "units", "%");
+ }
+ else if(contexts == 1) {
+ buffer_json_member_add_string(wb, "units", rrdcontext_acquired_units(qt->contexts.array[0].rca));
+ }
+ else if(contexts > 1) {
+ DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
+ for(size_t c = 0; c < qt->contexts.used ;c++)
+ dictionary_set(dict, rrdcontext_acquired_units(qt->contexts.array[c].rca), NULL, 0);
+
+ if(dictionary_entries(dict) == 1)
+ buffer_json_member_add_string(wb, "units", rrdcontext_acquired_units(qt->contexts.array[0].rca));
+ else {
+ buffer_json_member_add_array(wb, "units");
+ const char *s;
+ dfe_start_read(dict, s)
+ buffer_json_add_array_item_string(wb, s_dfe.name);
+ dfe_done(s);
+ buffer_json_array_close(wb);
+ }
+ dictionary_destroy(dict);
+ }
+}
- buffer_sprintf(wb, ",\n %sanomaly_rates%s: ", kq, kq);
+static void query_target_combined_chart_type(BUFFER *wb, QUERY_TARGET *qt, size_t contexts) {
+ if(contexts >= 1)
+ buffer_json_member_add_string(wb, "chart_type", rrdset_type_name(rrdcontext_acquired_chart_type(qt->contexts.array[0].rca)));
}
-void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value) {
- (void)format;
+static void rrdr_grouped_by_array_v2(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options __maybe_unused) {
+ QUERY_TARGET *qt = r->internal.qt;
- char kq[2] = "", // key quote
- sq[2] = ""; // string quote
+ buffer_json_member_add_array(wb, key);
- if( options & RRDR_OPTION_GOOGLE_JSON ) {
+ // find the deeper group-by
+ ssize_t g = 0;
+ for(g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
+ if(qt->request.group_by[g].group_by == RRDR_GROUP_BY_NONE)
+ break;
+ }
+
+ if(g > 0)
+ g--;
+
+ RRDR_GROUP_BY group_by = qt->request.group_by[g].group_by;
+
+ if(group_by & RRDR_GROUP_BY_SELECTED)
+ buffer_json_add_array_item_string(wb, "selected");
+
+ else if(group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)
+ buffer_json_add_array_item_string(wb, "percentage-of-instance");
+
+ else {
+
+ if(group_by & RRDR_GROUP_BY_DIMENSION)
+ buffer_json_add_array_item_string(wb, "dimension");
+
+ if(group_by & RRDR_GROUP_BY_INSTANCE)
+ buffer_json_add_array_item_string(wb, "instance");
+
+ if(group_by & RRDR_GROUP_BY_LABEL) {
+ BUFFER *b = buffer_create(0, NULL);
+ for (size_t l = 0; l < qt->group_by[g].used; l++) {
+ buffer_flush(b);
+ buffer_fast_strcat(b, "label:", 6);
+ buffer_strcat(b, qt->group_by[g].label_keys[l]);
+ buffer_json_add_array_item_string(wb, buffer_tostring(b));
+ }
+ buffer_free(b);
+ }
+
+ if(group_by & RRDR_GROUP_BY_NODE)
+ buffer_json_add_array_item_string(wb, "node");
+
+ if(group_by & RRDR_GROUP_BY_CONTEXT)
+ buffer_json_add_array_item_string(wb, "context");
+
+ if(group_by & RRDR_GROUP_BY_UNITS)
+ buffer_json_add_array_item_string(wb, "units");
+ }
+
+ buffer_json_array_close(wb); // group_by_order
+}
+
+static void rrdr_dimension_units_array_v2(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options, bool ignore_percentage) {
+ if(!r->du)
+ return;
+
+ bool percentage = !ignore_percentage && query_target_has_percentage_units(r->internal.qt);
+
+ buffer_json_member_add_array(wb, key);
+ for(size_t c = 0; c < r->d ; c++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
+
+ if(percentage)
+ buffer_json_add_array_item_string(wb, "%");
+ else
+ buffer_json_add_array_item_string(wb, string2str(r->du[c]));
+ }
+ buffer_json_array_close(wb);
+}
+
+static void rrdr_dimension_priority_array_v2(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
+ if(!r->dp)
+ return;
+
+ buffer_json_member_add_array(wb, key);
+ for(size_t c = 0; c < r->d ; c++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
+
+ buffer_json_add_array_item_uint64(wb, r->dp[c]);
+ }
+ buffer_json_array_close(wb);
+}
+
+static void rrdr_dimension_aggregated_array_v2(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
+ if(!r->dgbc)
+ return;
+
+ buffer_json_member_add_array(wb, key);
+ for(size_t c = 0; c < r->d ;c++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
+
+ buffer_json_add_array_item_uint64(wb, r->dgbc[c]);
+ }
+ buffer_json_array_close(wb);
+}
+
+static void query_target_title(BUFFER *wb, QUERY_TARGET *qt, size_t contexts) {
+ if(contexts == 1) {
+ buffer_json_member_add_string(wb, "title", rrdcontext_acquired_title(qt->contexts.array[0].rca));
+ }
+ else if(contexts > 1) {
+ BUFFER *t = buffer_create(0, NULL);
+ DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
+
+ buffer_strcat(t, "Chart for contexts: ");
+
+ size_t added = 0;
+ for(size_t c = 0; c < qt->contexts.used ;c++) {
+ bool *set = dictionary_set(dict, rrdcontext_acquired_id(qt->contexts.array[c].rca), NULL, sizeof(*set));
+ if(!*set) {
+ *set = true;
+ if(added)
+ buffer_fast_strcat(t, ", ", 2);
+
+ buffer_strcat(t, rrdcontext_acquired_id(qt->contexts.array[c].rca));
+ added++;
+ }
+ }
+ buffer_json_member_add_string(wb, "title", buffer_tostring(t));
+ dictionary_destroy(dict);
+ buffer_free(t);
+ }
+}
+
+static void query_target_detailed_objects_tree(BUFFER *wb, RRDR *r, RRDR_OPTIONS options) {
+ QUERY_TARGET *qt = r->internal.qt;
+ buffer_json_member_add_object(wb, "nodes");
+
+ time_t now_s = now_realtime_sec();
+ RRDHOST *last_host = NULL;
+ RRDCONTEXT_ACQUIRED *last_rca = NULL;
+ RRDINSTANCE_ACQUIRED *last_ria = NULL;
+
+ size_t h = 0, c = 0, i = 0, m = 0, q = 0;
+ for(; h < qt->nodes.used ; h++) {
+ QUERY_NODE *qn = query_node(qt, h);
+ RRDHOST *host = qn->rrdhost;
+
+ for( ;c < qt->contexts.used ;c++) {
+ QUERY_CONTEXT *qc = query_context(qt, c);
+ RRDCONTEXT_ACQUIRED *rca = qc->rca;
+ if(!rrdcontext_acquired_belongs_to_host(rca, host)) break;
+
+ for( ;i < qt->instances.used ;i++) {
+ QUERY_INSTANCE *qi = query_instance(qt, i);
+ RRDINSTANCE_ACQUIRED *ria = qi->ria;
+ if(!rrdinstance_acquired_belongs_to_context(ria, rca)) break;
+
+ for( ; m < qt->dimensions.used ; m++) {
+ QUERY_DIMENSION *qd = query_dimension(qt, m);
+ RRDMETRIC_ACQUIRED *rma = qd->rma;
+ if(!rrdmetric_acquired_belongs_to_instance(rma, ria)) break;
+
+ QUERY_METRIC *qm = NULL;
+ bool queried = false;
+ for( ; q < qt->query.used ;q++) {
+ QUERY_METRIC *tqm = query_metric(qt, q);
+ QUERY_DIMENSION *tqd = query_dimension(qt, tqm->link.query_dimension_id);
+ if(tqd->rma != rma) break;
+
+ queried = tqm->status & RRDR_DIMENSION_QUERIED;
+ qm = tqm;
+ }
+
+ if(!queried & !(options & RRDR_OPTION_ALL_DIMENSIONS))
+ continue;
+
+ if(host != last_host) {
+ if(last_host) {
+ if(last_rca) {
+ if(last_ria) {
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_object_close(wb); // instance
+ last_ria = NULL;
+ }
+ buffer_json_object_close(wb); // instances
+ buffer_json_object_close(wb); // context
+ last_rca = NULL;
+ }
+ buffer_json_object_close(wb); // contexts
+ buffer_json_object_close(wb); // host
+ last_host = NULL;
+ }
+
+ buffer_json_member_add_object(wb, host->machine_guid);
+ if(qn->node_id[0])
+ buffer_json_member_add_string(wb, "nd", qn->node_id);
+ buffer_json_member_add_uint64(wb, "ni", qn->slot);
+ buffer_json_member_add_string(wb, "nm", rrdhost_hostname(host));
+ buffer_json_member_add_object(wb, "contexts");
+
+ last_host = host;
+ }
+
+ if(rca != last_rca) {
+ if(last_rca) {
+ if(last_ria) {
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_object_close(wb); // instance
+ last_ria = NULL;
+ }
+ buffer_json_object_close(wb); // instances
+ buffer_json_object_close(wb); // context
+ last_rca = NULL;
+ }
+
+ buffer_json_member_add_object(wb, rrdcontext_acquired_id(rca));
+ buffer_json_member_add_object(wb, "instances");
+
+ last_rca = rca;
+ }
+
+ if(ria != last_ria) {
+ if(last_ria) {
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_object_close(wb); // instance
+ last_ria = NULL;
+ }
+
+ buffer_json_member_add_object(wb, rrdinstance_acquired_id(ria));
+ buffer_json_member_add_string(wb, "nm", rrdinstance_acquired_name(ria));
+ buffer_json_member_add_time_t(wb, "ue", rrdinstance_acquired_update_every(ria));
+ DICTIONARY *labels = rrdinstance_acquired_labels(ria);
+ if(labels) {
+ buffer_json_member_add_object(wb, "labels");
+ rrdlabels_to_buffer_json_members(labels, wb);
+ buffer_json_object_close(wb);
+ }
+ rrdset_rrdcalc_entries_v2(wb, ria);
+ buffer_json_member_add_object(wb, "dimensions");
+
+ last_ria = ria;
+ }
+
+ buffer_json_member_add_object(wb, rrdmetric_acquired_id(rma));
+ {
+ buffer_json_member_add_string(wb, "nm", rrdmetric_acquired_name(rma));
+ buffer_json_member_add_uint64(wb, "qr", queried ? 1 : 0);
+ time_t first_entry_s = rrdmetric_acquired_first_entry(rma);
+ time_t last_entry_s = rrdmetric_acquired_last_entry(rma);
+ buffer_json_member_add_time_t(wb, "fe", first_entry_s);
+ buffer_json_member_add_time_t(wb, "le", last_entry_s ? last_entry_s : now_s);
+
+ if(qm) {
+ if(qm->status & RRDR_DIMENSION_GROUPED) {
+ // buffer_json_member_add_string(wb, "grouped_as_id", string2str(qm->grouped_as.id));
+ buffer_json_member_add_string(wb, "as", string2str(qm->grouped_as.name));
+ }
+
+ query_target_points_statistics(wb, qt, &qm->query_points);
+
+ if(options & RRDR_OPTION_DEBUG)
+ jsonwrap_query_metric_plan(wb, qm);
+ }
+ }
+ buffer_json_object_close(wb); // metric
+ }
+ }
+ }
+ }
+
+ if(last_host) {
+ if(last_rca) {
+ if(last_ria) {
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_object_close(wb); // instance
+ last_ria = NULL;
+ }
+ buffer_json_object_close(wb); // instances
+ buffer_json_object_close(wb); // context
+ last_rca = NULL;
+ }
+ buffer_json_object_close(wb); // contexts
+ buffer_json_object_close(wb); // host
+ last_host = NULL;
+ }
+ buffer_json_object_close(wb); // hosts
+}
+
+void version_hashes_api_v2(BUFFER *wb, struct query_versions *versions) {
+ buffer_json_member_add_object(wb, "versions");
+ buffer_json_member_add_uint64(wb, "nodes_hard_hash", dictionary_version(rrdhost_root_index));
+ buffer_json_member_add_uint64(wb, "contexts_hard_hash", versions->contexts_hard_hash);
+ buffer_json_member_add_uint64(wb, "contexts_soft_hash", versions->contexts_soft_hash);
+ buffer_json_member_add_uint64(wb, "alerts_hard_hash", versions->alerts_hard_hash);
+ buffer_json_member_add_uint64(wb, "alerts_soft_hash", versions->alerts_soft_hash);
+ buffer_json_object_close(wb);
+}
+
+void rrdr_json_wrapper_begin2(RRDR *r, BUFFER *wb) {
+ QUERY_TARGET *qt = r->internal.qt;
+ RRDR_OPTIONS options = qt->window.options;
+
+ char kq[2] = "\"", // key quote
+ sq[2] = "\""; // string quote
+
+ if(unlikely(options & RRDR_OPTION_GOOGLE_JSON)) {
kq[0] = '\0';
sq[0] = '\'';
}
- else {
- kq[0] = '"';
- sq[0] = '"';
+
+ buffer_json_initialize(wb, kq, sq, 0, true, options & RRDR_OPTION_MINIFY);
+ buffer_json_member_add_uint64(wb, "api", 2);
+
+ if(options & RRDR_OPTION_DEBUG) {
+ buffer_json_member_add_string(wb, "id", qt->id);
+ buffer_json_member_add_object(wb, "request");
+ {
+ buffer_json_member_add_string(wb, "format", rrdr_format_to_string(qt->request.format));
+ web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", qt->request.options);
+
+ buffer_json_member_add_object(wb, "scope");
+ buffer_json_member_add_string(wb, "scope_nodes", qt->request.scope_nodes);
+ buffer_json_member_add_string(wb, "scope_contexts", qt->request.scope_contexts);
+ buffer_json_object_close(wb); // scope
+
+ buffer_json_member_add_object(wb, "selectors");
+ if (qt->request.host)
+ buffer_json_member_add_string(wb, "nodes", rrdhost_hostname(qt->request.host));
+ else
+ buffer_json_member_add_string(wb, "nodes", qt->request.nodes);
+ buffer_json_member_add_string(wb, "contexts", qt->request.contexts);
+ buffer_json_member_add_string(wb, "instances", qt->request.instances);
+ buffer_json_member_add_string(wb, "dimensions", qt->request.dimensions);
+ buffer_json_member_add_string(wb, "labels", qt->request.labels);
+ buffer_json_member_add_string(wb, "alerts", qt->request.alerts);
+ buffer_json_object_close(wb); // selectors
+
+ buffer_json_member_add_object(wb, "window");
+ buffer_json_member_add_time_t(wb, "after", qt->request.after);
+ buffer_json_member_add_time_t(wb, "before", qt->request.before);
+ buffer_json_member_add_uint64(wb, "points", qt->request.points);
+ if (qt->request.options & RRDR_OPTION_SELECTED_TIER)
+ buffer_json_member_add_uint64(wb, "tier", qt->request.tier);
+ else
+ buffer_json_member_add_string(wb, "tier", NULL);
+ buffer_json_object_close(wb); // window
+
+ buffer_json_member_add_object(wb, "aggregations");
+ {
+ buffer_json_member_add_object(wb, "time");
+ buffer_json_member_add_string(wb, "time_group", time_grouping_tostring(qt->request.time_group_method));
+ buffer_json_member_add_string(wb, "time_group_options", qt->request.time_group_options);
+ if (qt->request.resampling_time > 0)
+ buffer_json_member_add_time_t(wb, "time_resampling", qt->request.resampling_time);
+ else
+ buffer_json_member_add_string(wb, "time_resampling", NULL);
+ buffer_json_object_close(wb); // time
+
+ buffer_json_member_add_array(wb, "metrics");
+ for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
+ if(qt->request.group_by[g].group_by == RRDR_GROUP_BY_NONE)
+ break;
+
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_array(wb, "group_by");
+ buffer_json_group_by_to_array(wb, qt->request.group_by[g].group_by);
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_array(wb, "group_by_label");
+ for (size_t l = 0; l < qt->group_by[g].used; l++)
+ buffer_json_add_array_item_string(wb, qt->group_by[g].label_keys[l]);
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_string(
+ wb, "aggregation",group_by_aggregate_function_to_string(qt->request.group_by[g].aggregation));
+ }
+ buffer_json_object_close(wb);
+ }
+ buffer_json_array_close(wb); // group_by
+ }
+ buffer_json_object_close(wb); // aggregations
+
+ buffer_json_member_add_uint64(wb, "timeout", qt->request.timeout_ms);
+ }
+ buffer_json_object_close(wb); // request
+ }
+
+ version_hashes_api_v2(wb, &qt->versions);
+
+ buffer_json_member_add_object(wb, "summary");
+ struct summary_total_counts
+ nodes_totals = { 0 },
+ contexts_totals = { 0 },
+ instances_totals = { 0 },
+ metrics_totals = { 0 },
+ label_key_totals = { 0 },
+ label_key_value_totals = { 0 };
+ {
+ query_target_summary_nodes_v2(wb, qt, "nodes", &nodes_totals);
+ r->internal.contexts = query_target_summary_contexts_v2(wb, qt, "contexts", &contexts_totals);
+ query_target_summary_instances_v2(wb, qt, "instances", &instances_totals);
+ query_target_summary_dimensions_v12(wb, qt, "dimensions", true, &metrics_totals);
+ query_target_summary_labels_v12(wb, qt, "labels", true, &label_key_totals, &label_key_value_totals);
+ query_target_summary_alerts_v2(wb, qt, "alerts");
+ }
+ if(query_target_aggregatable(qt)) {
+ buffer_json_member_add_object(wb, "globals");
+ query_target_points_statistics(wb, qt, &qt->query_points);
+ buffer_json_object_close(wb); // globals
+ }
+ buffer_json_object_close(wb); // summary
+
+ buffer_json_member_add_object(wb, "totals");
+ query_target_total_counts(wb, "nodes", &nodes_totals);
+ query_target_total_counts(wb, "contexts", &contexts_totals);
+ query_target_total_counts(wb, "instances", &instances_totals);
+ query_target_total_counts(wb, "dimensions", &metrics_totals);
+ query_target_total_counts(wb, "label_keys", &label_key_totals);
+ query_target_total_counts(wb, "label_key_values", &label_key_value_totals);
+ buffer_json_object_close(wb); // totals
+
+ if(options & RRDR_OPTION_SHOW_DETAILS) {
+ buffer_json_member_add_object(wb, "detailed");
+ query_target_detailed_objects_tree(wb, r, options);
+ buffer_json_object_close(wb); // detailed
+ }
+
+ query_target_functions(wb, "functions", r);
+}
+
+//static void annotations_range_for_value_flags(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format __maybe_unused, RRDR_OPTIONS options, RRDR_VALUE_FLAGS flags, const char *type) {
+// const size_t dims = r->d, rows = r->rows;
+// size_t next_d_idx = 0;
+// for(size_t d = 0; d < dims ; d++) {
+// if(!rrdr_dimension_should_be_exposed(r->od[d], options))
+// continue;
+//
+// size_t d_idx = next_d_idx++;
+//
+// size_t t = 0;
+// while(t < rows) {
+//
+// // find the beginning
+// time_t started = 0;
+// for(; t < rows ;t++) {
+// RRDR_VALUE_FLAGS o = r->o[t * r->d + d];
+// if(o & flags) {
+// started = r->t[t];
+// break;
+// }
+// }
+//
+// if(started) {
+// time_t ended = 0;
+// for(; t < rows ;t++) {
+// RRDR_VALUE_FLAGS o = r->o[t * r->d + d];
+// if(!(o & flags)) {
+// ended = r->t[t];
+// break;
+// }
+// }
+//
+// if(!ended)
+// ended = r->t[rows - 1];
+//
+// buffer_json_add_array_item_object(wb);
+// buffer_json_member_add_string(wb, "t", type);
+// // buffer_json_member_add_string(wb, "d", string2str(r->dn[d]));
+// buffer_json_member_add_uint64(wb, "d", d_idx);
+// if(started == ended) {
+// if(options & RRDR_OPTION_MILLISECONDS)
+// buffer_json_member_add_time_t2ms(wb, "x", started);
+// else
+// buffer_json_member_add_time_t(wb, "x", started);
+// }
+// else {
+// buffer_json_member_add_array(wb, "x");
+// if(options & RRDR_OPTION_MILLISECONDS) {
+// buffer_json_add_array_item_time_t2ms(wb, started);
+// buffer_json_add_array_item_time_t2ms(wb, ended);
+// }
+// else {
+// buffer_json_add_array_item_time_t(wb, started);
+// buffer_json_add_array_item_time_t(wb, ended);
+// }
+// buffer_json_array_close(wb);
+// }
+// buffer_json_object_close(wb);
+// }
+// }
+// }
+//}
+//
+//void rrdr_json_wrapper_annotations(RRDR *r, BUFFER *wb, DATASOURCE_FORMAT format __maybe_unused, RRDR_OPTIONS options) {
+// buffer_json_member_add_array(wb, "annotations");
+//
+// annotations_range_for_value_flags(r, wb, format, options, RRDR_VALUE_EMPTY, "G"); // Gap
+// annotations_range_for_value_flags(r, wb, format, options, RRDR_VALUE_RESET, "O"); // Overflow
+// annotations_range_for_value_flags(r, wb, format, options, RRDR_VALUE_PARTIAL, "P"); // Partial
+//
+// buffer_json_array_close(wb); // annotations
+//}
+
+void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb) {
+ buffer_json_member_add_double(wb, "min", r->view.min);
+ buffer_json_member_add_double(wb, "max", r->view.max);
+
+ buffer_json_query_timings(wb, "timings", &r->internal.qt->timings);
+ buffer_json_finalize(wb);
+}
+
+void rrdr_json_wrapper_end2(RRDR *r, BUFFER *wb) {
+ QUERY_TARGET *qt = r->internal.qt;
+ DATASOURCE_FORMAT format = qt->request.format;
+ RRDR_OPTIONS options = qt->window.options;
+
+ buffer_json_member_add_object(wb, "db");
+ {
+ buffer_json_member_add_uint64(wb, "tiers", storage_tiers);
+ buffer_json_member_add_time_t(wb, "update_every", qt->db.minimum_latest_update_every_s);
+ buffer_json_member_add_time_t(wb, "first_entry", qt->db.first_time_s);
+ buffer_json_member_add_time_t(wb, "last_entry", qt->db.last_time_s);
+
+ query_target_combined_units_v2(wb, qt, r->internal.contexts, true);
+ buffer_json_member_add_object(wb, "dimensions");
+ {
+ rrdr_dimension_ids(wb, "ids", r, options);
+ rrdr_dimension_units_array_v2(wb, "units", r, options, true);
+ rrdr_dimension_query_points_statistics(wb, "sts", r, options, false);
+ }
+ buffer_json_object_close(wb); // dimensions
+
+ buffer_json_member_add_array(wb, "per_tier");
+ for(size_t tier = 0; tier < storage_tiers ; tier++) {
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_uint64(wb, "tier", tier);
+ buffer_json_member_add_uint64(wb, "queries", qt->db.tiers[tier].queries);
+ buffer_json_member_add_uint64(wb, "points", qt->db.tiers[tier].points);
+ buffer_json_member_add_time_t(wb, "update_every", qt->db.tiers[tier].update_every);
+ buffer_json_member_add_time_t(wb, "first_entry", qt->db.tiers[tier].retention.first_time_s);
+ buffer_json_member_add_time_t(wb, "last_entry", qt->db.tiers[tier].retention.last_time_s);
+ buffer_json_object_close(wb);
+ }
+ buffer_json_array_close(wb);
}
+ buffer_json_object_close(wb);
- if(string_value) buffer_strcat(wb, sq);
+ buffer_json_member_add_object(wb, "view");
+ {
+ query_target_title(wb, qt, r->internal.contexts);
+ buffer_json_member_add_time_t(wb, "update_every", r->view.update_every);
+ buffer_json_member_add_time_t(wb, "after", r->view.after);
+ buffer_json_member_add_time_t(wb, "before", r->view.before);
+
+ if(options & RRDR_OPTION_DEBUG) {
+ buffer_json_member_add_string(wb, "format", rrdr_format_to_string(format));
+ web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", options);
+ buffer_json_member_add_string(wb, "time_group", time_grouping_tostring(qt->request.time_group_method));
+ }
+
+ if(options & RRDR_OPTION_DEBUG) {
+ buffer_json_member_add_object(wb, "partial_data_trimming");
+ buffer_json_member_add_time_t(wb, "max_update_every", r->partial_data_trimming.max_update_every);
+ buffer_json_member_add_time_t(wb, "expected_after", r->partial_data_trimming.expected_after);
+ buffer_json_member_add_time_t(wb, "trimmed_after", r->partial_data_trimming.trimmed_after);
+ buffer_json_object_close(wb);
+ }
+
+ if(options & RRDR_OPTION_RETURN_RAW)
+ buffer_json_member_add_uint64(wb, "points", rrdr_rows(r));
+
+ query_target_combined_units_v2(wb, qt, r->internal.contexts, false);
+ query_target_combined_chart_type(wb, qt, r->internal.contexts);
+ buffer_json_member_add_object(wb, "dimensions");
+ {
+ rrdr_grouped_by_array_v2(wb, "grouped_by", r, options);
+ rrdr_dimension_ids(wb, "ids", r, options);
+ rrdr_dimension_names(wb, "names", r, options);
+ rrdr_dimension_units_array_v2(wb, "units", r, options, false);
+ rrdr_dimension_priority_array_v2(wb, "priorities", r, options);
+ rrdr_dimension_aggregated_array_v2(wb, "aggregated", r, options);
+ rrdr_dimension_query_points_statistics(wb, "sts", r, options, true);
+ rrdr_json_group_by_labels(wb, "labels", r, options);
+ }
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_member_add_double(wb, "min", r->view.min);
+ buffer_json_member_add_double(wb, "max", r->view.max);
+ }
+ buffer_json_object_close(wb); // view
- buffer_sprintf(wb, ",\n %smin%s: ", kq, kq);
- buffer_rrd_value(wb, r->min);
- buffer_sprintf(wb, ",\n %smax%s: ", kq, kq);
- buffer_rrd_value(wb, r->max);
- buffer_strcat(wb, "\n}\n");
+ buffer_json_agents_array_v2(wb, &r->internal.qt->timings, 0);
+ buffer_json_cloud_timings(wb, "timings", &r->internal.qt->timings);
+ buffer_json_finalize(wb);
}
diff --git a/web/api/formatters/json_wrapper.h b/web/api/formatters/json_wrapper.h
index 91c1475c5..a702f3a5c 100644
--- a/web/api/formatters/json_wrapper.h
+++ b/web/api/formatters/json_wrapper.h
@@ -6,10 +6,16 @@
#include "rrd2json.h"
#include "web/api/queries/query.h"
+typedef void (*wrapper_begin_t)(RRDR *r, BUFFER *wb);
+typedef void (*wrapper_end_t)(RRDR *r, BUFFER *wb);
-void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value,
- RRDR_GROUPING group_method);
-void rrdr_json_wrapper_anomaly_rates(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value);
-void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value);
+void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb);
+void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb);
+
+void rrdr_json_wrapper_begin2(RRDR *r, BUFFER *wb);
+void rrdr_json_wrapper_end2(RRDR *r, BUFFER *wb);
+
+struct query_versions;
+void version_hashes_api_v2(BUFFER *wb, struct query_versions *versions);
#endif //NETDATA_API_FORMATTER_JSON_WRAPPER_H
diff --git a/web/api/formatters/rrd2json.c b/web/api/formatters/rrd2json.c
index 64cde5b2b..139fa6ec8 100644
--- a/web/api/formatters/rrd2json.c
+++ b/web/api/formatters/rrd2json.c
@@ -7,63 +7,55 @@ void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb) {
rrdset2json(st, wb, NULL, NULL, 0);
}
-void rrdr_buffer_print_format(BUFFER *wb, uint32_t format) {
+const char *rrdr_format_to_string(DATASOURCE_FORMAT format) {
switch(format) {
case DATASOURCE_JSON:
- buffer_strcat(wb, DATASOURCE_FORMAT_JSON);
- break;
+ return DATASOURCE_FORMAT_JSON;
+
+ case DATASOURCE_JSON2:
+ return DATASOURCE_FORMAT_JSON2;
case DATASOURCE_DATATABLE_JSON:
- buffer_strcat(wb, DATASOURCE_FORMAT_DATATABLE_JSON);
- break;
+ return DATASOURCE_FORMAT_DATATABLE_JSON;
case DATASOURCE_DATATABLE_JSONP:
- buffer_strcat(wb, DATASOURCE_FORMAT_DATATABLE_JSONP);
- break;
+ return DATASOURCE_FORMAT_DATATABLE_JSONP;
case DATASOURCE_JSONP:
- buffer_strcat(wb, DATASOURCE_FORMAT_JSONP);
- break;
+ return DATASOURCE_FORMAT_JSONP;
case DATASOURCE_SSV:
- buffer_strcat(wb, DATASOURCE_FORMAT_SSV);
- break;
+ return DATASOURCE_FORMAT_SSV;
case DATASOURCE_CSV:
- buffer_strcat(wb, DATASOURCE_FORMAT_CSV);
- break;
+ return DATASOURCE_FORMAT_CSV;
case DATASOURCE_TSV:
- buffer_strcat(wb, DATASOURCE_FORMAT_TSV);
- break;
+ return DATASOURCE_FORMAT_TSV;
case DATASOURCE_HTML:
- buffer_strcat(wb, DATASOURCE_FORMAT_HTML);
- break;
+ return DATASOURCE_FORMAT_HTML;
case DATASOURCE_JS_ARRAY:
- buffer_strcat(wb, DATASOURCE_FORMAT_JS_ARRAY);
- break;
+ return DATASOURCE_FORMAT_JS_ARRAY;
case DATASOURCE_SSV_COMMA:
- buffer_strcat(wb, DATASOURCE_FORMAT_SSV_COMMA);
- break;
+ return DATASOURCE_FORMAT_SSV_COMMA;
default:
- buffer_strcat(wb, "unknown");
- break;
+ return "unknown";
}
}
int rrdset2value_api_v1(
- RRDSET *st
+ RRDSET *st
, BUFFER *wb
, NETDATA_DOUBLE *n
, const char *dimensions
, size_t points
, time_t after
, time_t before
- , RRDR_GROUPING group_method
+ , RRDR_TIME_GROUPING group_method
, const char *group_options
, time_t resampling_time
, uint32_t options
@@ -105,15 +97,15 @@ int rrdset2value_api_v1(
}
if(db_points_read)
- *db_points_read += r->internal.db_points_read;
+ *db_points_read += r->stats.db_points_read;
if(db_points_per_tier) {
for(size_t t = 0; t < storage_tiers ;t++)
- db_points_per_tier[t] += r->internal.tier_points_read[t];
+ db_points_per_tier[t] += r->internal.qt->db.tiers[t].points;
}
if(result_points_generated)
- *result_points_generated += r->internal.result_points_generated;
+ *result_points_generated += r->stats.result_points_generated;
if(rrdr_rows(r) == 0) {
if(db_after) *db_after = 0;
@@ -125,14 +117,14 @@ int rrdset2value_api_v1(
}
if(wb) {
- if (r->result_options & RRDR_RESULT_OPTION_RELATIVE)
+ if (r->view.flags & RRDR_RESULT_FLAG_RELATIVE)
buffer_no_cacheable(wb);
- else if (r->result_options & RRDR_RESULT_OPTION_ABSOLUTE)
+ else if (r->view.flags & RRDR_RESULT_FLAG_ABSOLUTE)
buffer_cacheable(wb);
}
- if(db_after) *db_after = r->after;
- if(db_before) *db_before = r->before;
+ if(db_after) *db_after = r->view.after;
+ if(db_before) *db_before = r->view.before;
long i = (!(options & RRDR_OPTION_REVERSED))?(long)rrdr_rows(r) - 1:0;
*n = rrdr2value(r, i, options, value_is_null, anomaly_rate);
@@ -144,108 +136,141 @@ cleanup:
return ret;
}
+static inline void buffer_json_member_add_key_only(BUFFER *wb, const char *key) {
+ buffer_print_json_comma_newline_spacing(wb);
+ buffer_print_json_key(wb, key);
+ buffer_fast_strcat(wb, ":", 1);
+ wb->json.stack[wb->json.depth].count++;
+}
+
+static inline void buffer_json_member_add_string_open(BUFFER *wb, const char *key) {
+ buffer_json_member_add_key_only(wb, key);
+ buffer_strcat(wb, wb->json.value_quote);
+}
+
+static inline void buffer_json_member_add_string_close(BUFFER *wb) {
+ buffer_strcat(wb, wb->json.value_quote);
+}
+
int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *latest_timestamp) {
+ wrapper_begin_t wrapper_begin = rrdr_json_wrapper_begin;
+ wrapper_end_t wrapper_end = rrdr_json_wrapper_end;
+
+ if(qt->request.version == 2) {
+ wrapper_begin = rrdr_json_wrapper_begin2;
+ wrapper_end = rrdr_json_wrapper_end2;
+ }
RRDR *r = rrd2rrdr(owa, qt);
+
if(!r) {
buffer_strcat(wb, "Cannot generate output with these parameters on this chart.");
return HTTP_RESP_INTERNAL_SERVER_ERROR;
}
- if (r->result_options & RRDR_RESULT_OPTION_CANCEL) {
+ if (r->view.flags & RRDR_RESULT_FLAG_CANCEL) {
rrdr_free(owa, r);
return HTTP_RESP_BACKEND_FETCH_FAILED;
}
- if(r->result_options & RRDR_RESULT_OPTION_RELATIVE)
+ if(r->view.flags & RRDR_RESULT_FLAG_RELATIVE)
buffer_no_cacheable(wb);
- else if(r->result_options & RRDR_RESULT_OPTION_ABSOLUTE)
+ else if(r->view.flags & RRDR_RESULT_FLAG_ABSOLUTE)
buffer_cacheable(wb);
if(latest_timestamp && rrdr_rows(r) > 0)
- *latest_timestamp = r->before;
+ *latest_timestamp = r->view.before;
DATASOURCE_FORMAT format = qt->request.format;
- RRDR_OPTIONS options = qt->request.options;
- RRDR_GROUPING group_method = qt->request.group_method;
+ RRDR_OPTIONS options = qt->window.options;
switch(format) {
case DATASOURCE_SSV:
if(options & RRDR_OPTION_JSON_WRAP) {
- wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ wb->content_type = CT_APPLICATION_JSON;
+ wrapper_begin(r, wb);
+ buffer_json_member_add_string_open(wb, "result");
rrdr2ssv(r, wb, options, "", " ", "");
- rrdr_json_wrapper_end(r, wb, format, options, 1);
+ buffer_json_member_add_string_close(wb);
+ wrapper_end(r, wb);
}
else {
- wb->contenttype = CT_TEXT_PLAIN;
+ wb->content_type = CT_TEXT_PLAIN;
rrdr2ssv(r, wb, options, "", " ", "");
}
break;
case DATASOURCE_SSV_COMMA:
if(options & RRDR_OPTION_JSON_WRAP) {
- wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ wb->content_type = CT_APPLICATION_JSON;
+ wrapper_begin(r, wb);
+ buffer_json_member_add_string_open(wb, "result");
rrdr2ssv(r, wb, options, "", ",", "");
- rrdr_json_wrapper_end(r, wb, format, options, 1);
+ buffer_json_member_add_string_close(wb);
+ wrapper_end(r, wb);
}
else {
- wb->contenttype = CT_TEXT_PLAIN;
+ wb->content_type = CT_TEXT_PLAIN;
rrdr2ssv(r, wb, options, "", ",", "");
}
break;
case DATASOURCE_JS_ARRAY:
if(options & RRDR_OPTION_JSON_WRAP) {
- wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
- rrdr2ssv(r, wb, options, "[", ",", "]");
- rrdr_json_wrapper_end(r, wb, format, options, 0);
+ wb->content_type = CT_APPLICATION_JSON;
+ wrapper_begin(r, wb);
+ buffer_json_member_add_array(wb, "result");
+ rrdr2ssv(r, wb, options, "", ",", "");
+ buffer_json_array_close(wb);
+ wrapper_end(r, wb);
}
else {
- wb->contenttype = CT_APPLICATION_JSON;
+ wb->content_type = CT_APPLICATION_JSON;
rrdr2ssv(r, wb, options, "[", ",", "]");
}
break;
case DATASOURCE_CSV:
if(options & RRDR_OPTION_JSON_WRAP) {
- wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ wb->content_type = CT_APPLICATION_JSON;
+ wrapper_begin(r, wb);
+ buffer_json_member_add_string_open(wb, "result");
rrdr2csv(r, wb, format, options, "", ",", "\\n", "");
- rrdr_json_wrapper_end(r, wb, format, options, 1);
+ buffer_json_member_add_string_close(wb);
+ wrapper_end(r, wb);
}
else {
- wb->contenttype = CT_TEXT_PLAIN;
+ wb->content_type = CT_TEXT_PLAIN;
rrdr2csv(r, wb, format, options, "", ",", "\r\n", "");
}
break;
case DATASOURCE_CSV_MARKDOWN:
if(options & RRDR_OPTION_JSON_WRAP) {
- wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ wb->content_type = CT_APPLICATION_JSON;
+ wrapper_begin(r, wb);
+ buffer_json_member_add_string_open(wb, "result");
rrdr2csv(r, wb, format, options, "", "|", "\\n", "");
- rrdr_json_wrapper_end(r, wb, format, options, 1);
+ buffer_json_member_add_string_close(wb);
+ wrapper_end(r, wb);
}
else {
- wb->contenttype = CT_TEXT_PLAIN;
+ wb->content_type = CT_TEXT_PLAIN;
rrdr2csv(r, wb, format, options, "", "|", "\r\n", "");
}
break;
case DATASOURCE_CSV_JSON_ARRAY:
- wb->contenttype = CT_APPLICATION_JSON;
+ wb->content_type = CT_APPLICATION_JSON;
if(options & RRDR_OPTION_JSON_WRAP) {
- rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
- buffer_strcat(wb, "[\n");
+ wrapper_begin(r, wb);
+ buffer_json_member_add_array(wb, "result");
rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n");
- buffer_strcat(wb, "\n]");
- rrdr_json_wrapper_end(r, wb, format, options, 0);
+ buffer_json_array_close(wb);
+ wrapper_end(r, wb);
}
else {
- wb->contenttype = CT_APPLICATION_JSON;
+ wb->content_type = CT_APPLICATION_JSON;
buffer_strcat(wb, "[\n");
rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n");
buffer_strcat(wb, "\n]");
@@ -254,28 +279,32 @@ int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *l
case DATASOURCE_TSV:
if(options & RRDR_OPTION_JSON_WRAP) {
- wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ wb->content_type = CT_APPLICATION_JSON;
+ wrapper_begin(r, wb);
+ buffer_json_member_add_string_open(wb, "result");
rrdr2csv(r, wb, format, options, "", "\t", "\\n", "");
- rrdr_json_wrapper_end(r, wb, format, options, 1);
+ buffer_json_member_add_string_close(wb);
+ wrapper_end(r, wb);
}
else {
- wb->contenttype = CT_TEXT_PLAIN;
+ wb->content_type = CT_TEXT_PLAIN;
rrdr2csv(r, wb, format, options, "", "\t", "\r\n", "");
}
break;
case DATASOURCE_HTML:
if(options & RRDR_OPTION_JSON_WRAP) {
- wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ wb->content_type = CT_APPLICATION_JSON;
+ wrapper_begin(r, wb);
+ buffer_json_member_add_string_open(wb, "result");
buffer_strcat(wb, "<html>\\n<center>\\n<table border=\\\"0\\\" cellpadding=\\\"5\\\" cellspacing=\\\"5\\\">\\n");
rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\\n", "");
buffer_strcat(wb, "</table>\\n</center>\\n</html>\\n");
- rrdr_json_wrapper_end(r, wb, format, options, 1);
+ buffer_json_member_add_string_close(wb);
+ wrapper_end(r, wb);
}
else {
- wb->contenttype = CT_TEXT_HTML;
+ wb->content_type = CT_TEXT_HTML;
buffer_strcat(wb, "<html>\n<center>\n<table border=\"0\" cellpadding=\"5\" cellspacing=\"5\">\n");
rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\n", "");
buffer_strcat(wb, "</table>\n</center>\n</html>\n");
@@ -283,57 +312,75 @@ int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *l
break;
case DATASOURCE_DATATABLE_JSONP:
- wb->contenttype = CT_APPLICATION_X_JAVASCRIPT;
+ wb->content_type = CT_APPLICATION_X_JAVASCRIPT;
- if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wrapper_begin(r, wb);
+ buffer_json_member_add_key_only(wb, "result");
+ }
rrdr2json(r, wb, options, 1);
if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_end(r, wb, format, options, 0);
+ wrapper_end(r, wb);
+
break;
case DATASOURCE_DATATABLE_JSON:
- wb->contenttype = CT_APPLICATION_JSON;
+ wb->content_type = CT_APPLICATION_JSON;
- if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wrapper_begin(r, wb);
+ buffer_json_member_add_key_only(wb, "result");
+ }
rrdr2json(r, wb, options, 1);
if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_end(r, wb, format, options, 0);
+ wrapper_end(r, wb);
+
break;
case DATASOURCE_JSONP:
- wb->contenttype = CT_APPLICATION_X_JAVASCRIPT;
- if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
+ wb->content_type = CT_APPLICATION_X_JAVASCRIPT;
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wrapper_begin(r, wb);
+ buffer_json_member_add_key_only(wb, "result");
+ }
rrdr2json(r, wb, options, 0);
if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_end(r, wb, format, options, 0);
+ wrapper_end(r, wb);
+
break;
case DATASOURCE_JSON:
default:
- wb->contenttype = CT_APPLICATION_JSON;
+ wb->content_type = CT_APPLICATION_JSON;
- if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ wrapper_begin(r, wb);
+ buffer_json_member_add_key_only(wb, "result");
+ }
rrdr2json(r, wb, options, 0);
if(options & RRDR_OPTION_JSON_WRAP) {
- if(options & RRDR_OPTION_RETURN_JWAR) {
- rrdr_json_wrapper_anomaly_rates(r, wb, format, options, 0);
- rrdr2json(r, wb, options | RRDR_OPTION_INTERNAL_AR, 0);
+ if (options & RRDR_OPTION_RETURN_JWAR) {
+ buffer_json_member_add_key_only(wb, "anomaly_rates");
+ rrdr2json(r, wb, options | RRDR_OPTION_INTERNAL_AR, false);
}
- rrdr_json_wrapper_end(r, wb, format, options, 0);
+ wrapper_end(r, wb);
}
break;
+
+ case DATASOURCE_JSON2:
+ wb->content_type = CT_APPLICATION_JSON;
+ wrapper_begin(r, wb);
+ rrdr2json_v2(r, wb);
+ wrapper_end(r, wb);
+ break;
}
rrdr_free(owa, r);
diff --git a/web/api/formatters/rrd2json.h b/web/api/formatters/rrd2json.h
index 88b9f773f..def26c754 100644
--- a/web/api/formatters/rrd2json.h
+++ b/web/api/formatters/rrd2json.h
@@ -3,6 +3,23 @@
#ifndef NETDATA_RRD2JSON_H
#define NETDATA_RRD2JSON_H 1
+// type of JSON generations
+typedef enum {
+ DATASOURCE_JSON = 0,
+ DATASOURCE_DATATABLE_JSON = 1,
+ DATASOURCE_DATATABLE_JSONP = 2,
+ DATASOURCE_SSV = 3,
+ DATASOURCE_CSV = 4,
+ DATASOURCE_JSONP = 5,
+ DATASOURCE_TSV = 6,
+ DATASOURCE_HTML = 7,
+ DATASOURCE_JS_ARRAY = 8,
+ DATASOURCE_SSV_COMMA = 9,
+ DATASOURCE_CSV_JSON_ARRAY = 10,
+ DATASOURCE_CSV_MARKDOWN = 11,
+ DATASOURCE_JSON2 = 12,
+} DATASOURCE_FORMAT;
+
#include "web/api/web_api_v1.h"
#include "web/api/exporters/allmetrics.h"
@@ -23,23 +40,8 @@
#define API_RELATIVE_TIME_MAX (3 * 365 * 86400)
-// type of JSON generations
-typedef enum {
- DATASOURCE_JSON = 0,
- DATASOURCE_DATATABLE_JSON = 1,
- DATASOURCE_DATATABLE_JSONP = 2,
- DATASOURCE_SSV = 3,
- DATASOURCE_CSV = 4,
- DATASOURCE_JSONP = 5,
- DATASOURCE_TSV = 6,
- DATASOURCE_HTML = 7,
- DATASOURCE_JS_ARRAY = 8,
- DATASOURCE_SSV_COMMA = 9,
- DATASOURCE_CSV_JSON_ARRAY = 10,
- DATASOURCE_CSV_MARKDOWN = 11,
-} DATASOURCE_FORMAT;
-
#define DATASOURCE_FORMAT_JSON "json"
+#define DATASOURCE_FORMAT_JSON2 "json2"
#define DATASOURCE_FORMAT_DATATABLE_JSON "datatable"
#define DATASOURCE_FORMAT_DATATABLE_JSONP "datasource"
#define DATASOURCE_FORMAT_JSONP "jsonp"
@@ -53,19 +55,21 @@ typedef enum {
#define DATASOURCE_FORMAT_CSV_MARKDOWN "markdown"
void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb);
-void rrdr_buffer_print_format(BUFFER *wb, uint32_t format);
+const char *rrdr_format_to_string(DATASOURCE_FORMAT format);
int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, struct query_target *qt, time_t *latest_timestamp);
+void rrdr_json_group_by_labels(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options);
+
int rrdset2value_api_v1(
- RRDSET *st
+ RRDSET *st
, BUFFER *wb
, NETDATA_DOUBLE *n
, const char *dimensions
, size_t points
, time_t after
, time_t before
- , RRDR_GROUPING group_method
+ , RRDR_TIME_GROUPING group_method
, const char *group_options
, time_t resampling_time
, uint32_t options
@@ -82,4 +86,15 @@ int rrdset2value_api_v1(
, STORAGE_PRIORITY priority
);
+static inline bool rrdr_dimension_should_be_exposed(RRDR_DIMENSION_FLAGS rrdr_dim_flags, RRDR_OPTIONS options) {
+ if(unlikely(options & RRDR_OPTION_RETURN_RAW))
+ return true;
+
+ if(unlikely(rrdr_dim_flags & RRDR_DIMENSION_HIDDEN)) return false;
+ if(unlikely(!(rrdr_dim_flags & RRDR_DIMENSION_QUERIED))) return false;
+ if(unlikely((options & RRDR_OPTION_NONZERO) && !(rrdr_dim_flags & RRDR_DIMENSION_NONZERO))) return false;
+
+ return true;
+}
+
#endif /* NETDATA_RRD2JSON_H */
diff --git a/web/api/formatters/rrdset2json.c b/web/api/formatters/rrdset2json.c
index 449d4ddf5..156f4486b 100644
--- a/web/api/formatters/rrdset2json.c
+++ b/web/api/formatters/rrdset2json.c
@@ -96,9 +96,9 @@ void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memor
buffer_strcat(wb, ",\n\t\t\t\t\"");
else
buffer_strcat(wb, "\t\t\t\t\"");
- buffer_strcat_jsonescape(wb, rrddim_id(rd));
+ buffer_json_strcat(wb, rrddim_id(rd));
buffer_strcat(wb, "\": { \"name\": \"");
- buffer_strcat_jsonescape(wb, rrddim_name(rd));
+ buffer_json_strcat(wb, rrddim_name(rd));
buffer_strcat(wb, "\" }");
dimensions++;
@@ -112,9 +112,9 @@ void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memor
health_api_v1_chart_custom_variables2json(st, wb);
buffer_strcat(wb, ",\n\t\t\t\"green\": ");
- buffer_rrd_value(wb, st->green);
+ buffer_print_netdata_double(wb, st->green);
buffer_strcat(wb, ",\n\t\t\t\"red\": ");
- buffer_rrd_value(wb, st->red);
+ buffer_print_netdata_double(wb, st->red);
if (likely(!skip_volatile)) {
buffer_strcat(wb, ",\n\t\t\t\"alarms\": {\n");
diff --git a/web/api/formatters/ssv/README.md b/web/api/formatters/ssv/README.md
index 4ca2a64ca..434d56721 100644
--- a/web/api/formatters/ssv/README.md
+++ b/web/api/formatters/ssv/README.md
@@ -1,6 +1,10 @@
<!--
title: "SSV formatter"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/ssv/README.md
+sidebar_label: "SSV formatter"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Formatters"
-->
# SSV formatter
diff --git a/web/api/formatters/ssv/ssv.c b/web/api/formatters/ssv/ssv.c
index d561980d9..65de0464b 100644
--- a/web/api/formatters/ssv/ssv.c
+++ b/web/api/formatters/ssv/ssv.c
@@ -20,12 +20,12 @@ void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, con
NETDATA_DOUBLE v = rrdr2value(r, i, options, &all_values_are_null, NULL);
if(likely(i != start)) {
- if(r->min > v) r->min = v;
- if(r->max < v) r->max = v;
+ if(r->view.min > v) r->view.min = v;
+ if(r->view.max < v) r->view.max = v;
}
else {
- r->min = v;
- r->max = v;
+ r->view.min = v;
+ r->view.max = v;
}
if(likely(i != start))
@@ -38,7 +38,7 @@ void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, con
buffer_strcat(wb, "null");
}
else
- buffer_rrd_value(wb, v);
+ buffer_print_netdata_double(wb, v);
}
buffer_strcat(wb, suffix);
//info("RRD2SSV(): %s: END", r->st->id);
diff --git a/web/api/formatters/value/README.md b/web/api/formatters/value/README.md
index 5b75ded7c..5631d8207 100644
--- a/web/api/formatters/value/README.md
+++ b/web/api/formatters/value/README.md
@@ -1,6 +1,10 @@
<!--
title: "Value formatter"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/formatters/value/README.md
+sidebar_label: "Value formatter"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Formatters"
-->
# Value formatter
diff --git a/web/api/formatters/value/value.c b/web/api/formatters/value/value.c
index fd9188057..1d07f62f6 100644
--- a/web/api/formatters/value/value.c
+++ b/web/api/formatters/value/value.c
@@ -4,9 +4,7 @@
inline NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null, NETDATA_DOUBLE *anomaly_rate) {
- QUERY_TARGET *qt = r->internal.qt;
- long c;
- const long used = qt->query.used;
+ size_t c;
NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
@@ -15,49 +13,15 @@ inline NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all
NETDATA_DOUBLE sum = 0, min = 0, max = 0, v;
int all_null = 1, init = 1;
- NETDATA_DOUBLE total = 1;
NETDATA_DOUBLE total_anomaly_rate = 0;
- int set_min_max = 0;
- if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
- total = 0;
- for (c = 0; c < used; c++) {
- if(unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
- NETDATA_DOUBLE n = cn[c];
-
- if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
- n = -n;
-
- total += n;
- }
- // prevent a division by zero
- if(total == 0) total = 1;
- set_min_max = 1;
- }
-
// for each dimension
- for (c = 0; c < used; c++) {
- if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- if(unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
- if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
+ for (c = 0; c < r->d ; c++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[c], options))
+ continue;
NETDATA_DOUBLE n = cn[c];
- if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
- n = -n;
-
- if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
- n = n * 100 / total;
-
- if(unlikely(set_min_max)) {
- r->min = r->max = n;
- set_min_max = 0;
- }
-
- if(n < r->min) r->min = n;
- if(n > r->max) r->max = n;
- }
-
if(unlikely(init)) {
if(n > 0) {
min = 0;
@@ -107,10 +71,11 @@ inline NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all
QUERY_VALUE rrdmetric2value(RRDHOST *host,
struct rrdcontext_acquired *rca, struct rrdinstance_acquired *ria, struct rrdmetric_acquired *rma,
time_t after, time_t before,
- RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options,
+ RRDR_OPTIONS options, RRDR_TIME_GROUPING time_group_method, const char *time_group_options,
size_t tier, time_t timeout, QUERY_SOURCE query_source, STORAGE_PRIORITY priority
) {
QUERY_TARGET_REQUEST qtr = {
+ .version = 1,
.host = host,
.rca = rca,
.ria = ria,
@@ -119,16 +84,17 @@ QUERY_VALUE rrdmetric2value(RRDHOST *host,
.before = before,
.points = 1,
.options = options,
- .group_method = group_method,
- .group_options = group_options,
+ .time_group_method = time_group_method,
+ .time_group_options = time_group_options,
.tier = tier,
- .timeout = timeout,
+ .timeout_ms = timeout,
.query_source = query_source,
.priority = priority,
};
ONEWAYALLOC *owa = onewayalloc_create(16 * 1024);
- RRDR *r = rrd2rrdr(owa, query_target_create(&qtr));
+ QUERY_TARGET *qt = query_target_create(&qtr);
+ RRDR *r = rrd2rrdr(owa, qt);
QUERY_VALUE qv;
@@ -136,18 +102,37 @@ QUERY_VALUE rrdmetric2value(RRDHOST *host,
qv = (QUERY_VALUE) {
.value = NAN,
.anomaly_rate = NAN,
+ .sp = {
+ .count = 0,
+ .min = NAN,
+ .max = NAN,
+ .sum = NAN,
+ .anomaly_count = 0,
+ },
+ .duration_ut = (r) ? r->internal.qt->timings.executed_ut - r->internal.qt->timings.received_ut : 0,
};
}
else {
qv = (QUERY_VALUE) {
- .after = r->after,
- .before = r->before,
- .points_read = r->internal.db_points_read,
- .result_points = r->internal.result_points_generated,
+ .after = r->view.after,
+ .before = r->view.before,
+ .points_read = r->stats.db_points_read,
+ .result_points = r->stats.result_points_generated,
+ .sp = {
+ .count = 0,
+ },
+ .duration_ut = r->internal.qt->timings.executed_ut - r->internal.qt->timings.received_ut,
};
+ for(size_t d = 0; d < r->internal.qt->query.used ;d++) {
+ if(!rrdr_dimension_should_be_exposed(r->internal.qt->query.array[d].status, options))
+ continue;
+
+ storage_point_merge_to(qv.sp, r->internal.qt->query.array[d].query_points);
+ }
+
for(size_t t = 0; t < storage_tiers ;t++)
- qv.storage_points_per_tier[t] = r->internal.tier_points_read[t];
+ qv.storage_points_per_tier[t] = r->internal.qt->db.tiers[t].points;
long i = (!(options & RRDR_OPTION_REVERSED))?(long)rrdr_rows(r) - 1:0;
int all_values_are_null = 0;
@@ -159,6 +144,7 @@ QUERY_VALUE rrdmetric2value(RRDHOST *host,
}
rrdr_free(owa, r);
+ query_target_release(qt);
onewayalloc_destroy(owa);
return qv;
diff --git a/web/api/formatters/value/value.h b/web/api/formatters/value/value.h
index 3f7f51ccb..072ca14f8 100644
--- a/web/api/formatters/value/value.h
+++ b/web/api/formatters/value/value.h
@@ -13,6 +13,8 @@ typedef struct storage_value {
size_t points_read;
size_t storage_points_per_tier[RRD_STORAGE_TIERS];
size_t result_points;
+ STORAGE_POINT sp;
+ usec_t duration_ut;
} QUERY_VALUE;
struct rrdmetric_acquired;
@@ -22,7 +24,7 @@ struct rrdcontext_acquired;
QUERY_VALUE rrdmetric2value(RRDHOST *host,
struct rrdcontext_acquired *rca, struct rrdinstance_acquired *ria, struct rrdmetric_acquired *rma,
time_t after, time_t before,
- RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options,
+ RRDR_OPTIONS options, RRDR_TIME_GROUPING time_group_method, const char *time_group_options,
size_t tier, time_t timeout, QUERY_SOURCE query_source, STORAGE_PRIORITY priority
);
diff --git a/web/api/health/README.md b/web/api/health/README.md
index bfdd0ac68..dd46854a1 100644
--- a/web/api/health/README.md
+++ b/web/api/health/README.md
@@ -2,6 +2,10 @@
title: "Health API Calls"
date: 2020-04-27
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/health/README.md
+sidebar_label: "Health API Calls"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api"
-->
# Health API Calls
diff --git a/web/api/health/health_cmdapi.c b/web/api/health/health_cmdapi.c
index 7a939bc0f..7c4869bd3 100644
--- a/web/api/health/health_cmdapi.c
+++ b/web/api/health/health_cmdapi.c
@@ -121,7 +121,7 @@ int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, c
BUFFER *wb = w->response.data;
buffer_flush(wb);
- wb->contenttype = CT_TEXT_PLAIN;
+ wb->content_type = CT_TEXT_PLAIN;
buffer_flush(w->response.data);
@@ -139,10 +139,10 @@ int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, c
ret = HTTP_RESP_FORBIDDEN;
} else {
while (url) {
- char *value = mystrsep(&url, "&");
+ char *value = strsep_skip_consecutive_separators(&url, "&");
if (!value || !*value) continue;
- char *key = mystrsep(&value, "=");
+ char *key = strsep_skip_consecutive_separators(&value, "=");
if (!key || !*key) continue;
if (!value || !*value) continue;
@@ -171,7 +171,7 @@ int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, c
silencers->silencers = NULL;
buffer_strcat(wb, HEALTH_CMDAPI_MSG_RESET);
} else if (!strcmp(value, HEALTH_CMDAPI_CMD_LIST)) {
- w->response.data->contenttype = CT_APPLICATION_JSON;
+ w->response.data->content_type = CT_APPLICATION_JSON;
health_silencers2json(wb);
config_changed=0;
}
diff --git a/web/api/netdata-swagger.json b/web/api/netdata-swagger.json
index cb2b4809c..16d8bfaf1 100644
--- a/web/api/netdata-swagger.json
+++ b/web/api/netdata-swagger.json
@@ -3,12 +3,210 @@
"info": {
"title": "Netdata API",
"description": "Real-time performance and health monitoring.",
- "version": "1.33.1"
+ "version": "1.38",
+ "contact": {
+ "name": "Netdata Agent API",
+ "email": "info@netdata.cloud",
+ "url": "https://netdata.cloud"
+ },
+ "license": {
+ "name": "GPL v3+",
+ "url": "https://github.com/netdata/netdata/blob/master/LICENSE"
+ }
},
+ "servers": [
+ {
+ "url": "https://registry.my-netdata.io"
+ },
+ {
+ "url": "http://registry.my-netdata.io"
+ },
+ {
+ "url": "http://localhost:19999"
+ }
+ ],
+ "tags": [
+ {
+ "name": "nodes",
+ "description": "Everything related to monitored nodes"
+ },
+ {
+ "name": "charts",
+ "description": "Everything related to chart instances - DO NOT USE IN NEW CODE - use contexts instead"
+ },
+ {
+ "name": "contexts",
+ "description": "Everything related contexts - in new code, use this instead of charts"
+ },
+ {
+ "name": "data",
+ "description": "Everything related to data queries"
+ },
+ {
+ "name": "badges",
+ "description": "Everything related to dynamic badges based on metric data"
+ },
+ {
+ "name": "weights",
+ "description": "Everything related to scoring / weighting metrics"
+ },
+ {
+ "name": "functions",
+ "description": "Everything related to functions"
+ },
+ {
+ "name": "alerts",
+ "description": "Everything related to alerts"
+ },
+ {
+ "name": "management",
+ "description": "Everything related to managing netdata agents"
+ }
+ ],
"paths": {
- "/info": {
+ "/api/v2/nodes": {
+ "get": {
+ "operationId": "getNodes2",
+ "tags": [
+ "nodes"
+ ],
+ "summary": "Nodes Info v2",
+ "description": "Get a list of all nodes hosted by this Netdata agent.\n",
+ "parameters": [
+ {
+ "$ref": "#/components/parameters/scopeNodes"
+ },
+ {
+ "$ref": "#/components/parameters/scopeContexts"
+ },
+ {
+ "$ref": "#/components/parameters/filterNodes"
+ },
+ {
+ "$ref": "#/components/parameters/filterContexts"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "description": "`/api/v2/nodes` response for all nodes hosted by a Netdata agent.\n",
+ "type": "object",
+ "properties": {
+ "api": {
+ "$ref": "#/components/schemas/api"
+ },
+ "agents": {
+ "$ref": "#/components/schemas/agents"
+ },
+ "versions": {
+ "$ref": "#/components/schemas/versions"
+ },
+ "nodes": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/nodeFull"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/api/v2/contexts": {
+ "get": {
+ "operationId": "getContexts2",
+ "tags": [
+ "contexts"
+ ],
+ "summary": "Contexts Info v2",
+ "description": "Get a list of all contexts, across all nodes, hosted by this Netdata agent.\n",
+ "parameters": [
+ {
+ "$ref": "#/components/parameters/scopeNodes"
+ },
+ {
+ "$ref": "#/components/parameters/scopeContexts"
+ },
+ {
+ "$ref": "#/components/parameters/filterNodes"
+ },
+ {
+ "$ref": "#/components/parameters/filterContexts"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/contexts2"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/api/v2/q": {
"get": {
- "summary": "Get netdata basic information",
+ "operationId": "q2",
+ "tags": [
+ "contexts"
+ ],
+ "summary": "Full Text Search v2",
+ "description": "Get a list of contexts, across all nodes, hosted by this Netdata agent, matching a string expression\n",
+ "parameters": [
+ {
+ "name": "q",
+ "in": "query",
+ "description": "The strings to search for, formatted as a simple pattern",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "format": "simple pattern"
+ }
+ },
+ {
+ "$ref": "#/components/parameters/scopeNodes"
+ },
+ {
+ "$ref": "#/components/parameters/scopeContexts"
+ },
+ {
+ "$ref": "#/components/parameters/filterNodes"
+ },
+ {
+ "$ref": "#/components/parameters/filterContexts"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/contexts2"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/api/v1/info": {
+ "get": {
+ "operationId": "getNodeInfo1",
+ "tags": [
+ "nodes"
+ ],
+ "summary": "Node Info v1",
"description": "The info endpoint returns basic information about netdata. It provides:\n* netdata version\n* netdata unique id\n* list of hosts mirrored (includes itself)\n* Operating System, Virtualization, K8s nodes and Container technology information\n* List of active collector plugins and modules\n* Streaming information\n* number of alarms in the host\n * number of alarms in normal state\n * number of alarms in warning state\n * number of alarms in critical state\n",
"responses": {
"200": {
@@ -27,9 +225,13 @@
}
}
},
- "/charts": {
+ "/api/v1/charts": {
"get": {
- "summary": "Get a list of all charts available at the server",
+ "operationId": "getNodeCharts1",
+ "tags": [
+ "charts"
+ ],
+ "summary": "List all charts v1 - EOL",
"description": "The charts endpoint returns a summary about all charts stored in the netdata server.",
"responses": {
"200": {
@@ -45,21 +247,17 @@
}
}
},
- "/chart": {
+ "/api/v1/chart": {
"get": {
- "summary": "Get info about a specific chart",
+ "operationId": "getNodeChart1",
+ "tags": [
+ "charts"
+ ],
+ "summary": "Get one chart v1 - EOL",
"description": "The chart endpoint returns detailed information about a chart.",
"parameters": [
{
- "name": "chart",
- "in": "query",
- "description": "The id of the chart as returned by the /charts call.",
- "required": true,
- "schema": {
- "type": "string",
- "format": "as returned by /charts",
- "default": "system.cpu"
- }
+ "$ref": "#/components/parameters/chart"
}
],
"responses": {
@@ -82,88 +280,32 @@
}
}
},
- "/contexts": {
+ "/api/v1/contexts": {
"get": {
- "summary": "Get a list of all contexts available at the server",
+ "operationId": "getNodeContexts1",
+ "tags": [
+ "contexts"
+ ],
+ "summary": "Get a list of all node contexts available v1",
"description": "The contexts endpoint returns a summary about all contexts stored in the netdata server.",
"parameters": [
{
- "name": "options",
- "in": "query",
- "description": "Options that affect data generation.",
- "required": false,
- "allowEmptyValue": true,
- "schema": {
- "type": "array",
- "items": {
- "type": "string",
- "enum": [
- "full",
- "all",
- "charts",
- "dimensions",
- "labels",
- "uuids",
- "queue",
- "flags",
- "deleted",
- "deepscan"
- ]
- },
- "default": [
- "full"
- ]
- }
+ "$ref": "#/components/parameters/dimensions"
},
{
- "name": "after",
- "in": "query",
- "description": "limit the results on context having data after this timestamp.",
- "required": false,
- "schema": {
- "type": "number",
- "format": "integer"
- }
+ "$ref": "#/components/parameters/chart_label_key"
},
{
- "name": "before",
- "in": "query",
- "description": "limit the results on context having data before this timestamp.",
- "required": false,
- "schema": {
- "type": "number",
- "format": "integer"
- }
+ "$ref": "#/components/parameters/chart_labels_filter"
},
{
- "name": "chart_label_key",
- "in": "query",
- "description": "a simple pattern matching charts label keys (use comma or pipe as separator)",
- "required": false,
- "allowEmptyValue": true,
- "schema": {
- "type": "string"
- }
+ "$ref": "#/components/parameters/contextOptions1"
},
{
- "name": "chart_labels_filter",
- "in": "query",
- "description": "a simple pattern matching charts label key and values (use colon for equality, comma or pipe as separator)",
- "required": false,
- "allowEmptyValue": true,
- "schema": {
- "type": "string"
- }
+ "$ref": "#/components/parameters/after"
},
{
- "name": "dimensions",
- "in": "query",
- "description": "a simple pattern matching dimensions (use comma or pipe as separator)",
- "required": false,
- "allowEmptyValue": true,
- "schema": {
- "type": "string"
- }
+ "$ref": "#/components/parameters/before"
}
],
"responses": {
@@ -180,145 +322,324 @@
}
}
},
- "/context": {
+ "/api/v1/context": {
"get": {
+ "operationId": "getNodeContext1",
+ "tags": [
+ "contexts"
+ ],
"summary": "Get info about a specific context",
- "description": "The context endpoint returns detailed information about a given context.",
+ "description": "The context endpoint returns detailed information about a given context.\nThe `context` parameter is required for this call.\n",
"parameters": [
{
- "name": "context",
- "in": "query",
- "description": "The id of the context as returned by the /contexts call.",
- "required": true,
- "schema": {
- "type": "string",
- "format": "as returned by /contexts",
- "default": "system.cpu"
+ "$ref": "#/components/parameters/context"
+ },
+ {
+ "$ref": "#/components/parameters/dimensions"
+ },
+ {
+ "$ref": "#/components/parameters/chart_label_key"
+ },
+ {
+ "$ref": "#/components/parameters/chart_labels_filter"
+ },
+ {
+ "$ref": "#/components/parameters/contextOptions1"
+ },
+ {
+ "$ref": "#/components/parameters/after"
+ },
+ {
+ "$ref": "#/components/parameters/before"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "A javascript object with detailed information about the context.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/context"
+ }
+ }
}
},
+ "400": {
+ "description": "No context id was supplied in the request."
+ },
+ "404": {
+ "description": "No context with the given id is found."
+ }
+ }
+ }
+ },
+ "/api/v2/data": {
+ "get": {
+ "operationId": "dataQuery2",
+ "tags": [
+ "data"
+ ],
+ "summary": "Data Query v2",
+ "description": "Multi-node, multi-context, multi-instance, multi-dimension data queries, with time and metric aggregation.\n",
+ "parameters": [
{
- "name": "options",
+ "name": "group_by",
"in": "query",
- "description": "Options that affect data generation.",
+ "description": "A comma separated list of the groupings required.\nAll possible values can be combined together, except `selected`. If `selected` is given in the list, all others are ignored.\nThe order they are placed in the list is currently ignored.\n",
"required": false,
- "allowEmptyValue": true,
"schema": {
"type": "array",
"items": {
"type": "string",
"enum": [
- "full",
- "all",
- "charts",
- "dimensions",
- "labels",
- "uuids",
- "queue",
- "flags",
- "deleted",
- "deepscan"
+ "dimension",
+ "instance",
+ "percentage-of-instance",
+ "label",
+ "node",
+ "context",
+ "units",
+ "selected"
]
},
"default": [
- "full"
+ "dimension"
]
}
},
{
- "name": "after",
+ "name": "group_by_label",
"in": "query",
- "description": "limit the results on context having data after this timestamp.",
+ "description": "A comma separated list of the label keys to group by their values. The order of the labels in the list is respected.\n",
"required": false,
"schema": {
- "type": "number",
- "format": "integer"
+ "type": "string",
+ "format": "comma separated list of label keys to group by",
+ "default": ""
}
},
{
- "name": "before",
+ "name": "aggregation",
"in": "query",
- "description": "limit the results on context having data before this timestamp.",
+ "description": "The aggregation function to apply when grouping metrics together.\nWhen option `raw` is given, `average` and `avg` behave like `sum` and the caller is expected to calculate the average.\n",
"required": false,
"schema": {
- "type": "number",
- "format": "integer"
+ "type": "string",
+ "enum": [
+ "min",
+ "max",
+ "avg",
+ "average",
+ "sum"
+ ],
+ "default": "average"
}
},
{
- "name": "chart_label_key",
- "in": "query",
- "description": "a simple pattern matching charts label keys (use comma or pipe as separator)",
- "required": false,
- "allowEmptyValue": true,
- "schema": {
- "type": "string"
- }
+ "$ref": "#/components/parameters/scopeNodes"
},
{
- "name": "chart_labels_filter",
- "in": "query",
- "description": "a simple pattern matching charts label key and values (use colon for equality, comma or pipe as separator)",
- "required": false,
- "allowEmptyValue": true,
- "schema": {
- "type": "string"
- }
+ "$ref": "#/components/parameters/scopeContexts"
},
{
- "name": "dimensions",
- "in": "query",
- "description": "a simple pattern matching dimensions (use comma or pipe as separator)",
- "required": false,
- "allowEmptyValue": true,
- "schema": {
- "type": "string"
- }
+ "$ref": "#/components/parameters/filterNodes"
+ },
+ {
+ "$ref": "#/components/parameters/filterContexts"
+ },
+ {
+ "$ref": "#/components/parameters/filterInstances"
+ },
+ {
+ "$ref": "#/components/parameters/filterLabels"
+ },
+ {
+ "$ref": "#/components/parameters/filterAlerts"
+ },
+ {
+ "$ref": "#/components/parameters/filterDimensions"
+ },
+ {
+ "$ref": "#/components/parameters/after"
+ },
+ {
+ "$ref": "#/components/parameters/before"
+ },
+ {
+ "$ref": "#/components/parameters/points"
+ },
+ {
+ "$ref": "#/components/parameters/tier"
+ },
+ {
+ "$ref": "#/components/parameters/dataQueryOptions"
+ },
+ {
+ "$ref": "#/components/parameters/dataTimeGroup2"
+ },
+ {
+ "$ref": "#/components/parameters/dataTimeGroupOptions2"
+ },
+ {
+ "$ref": "#/components/parameters/dataTimeResampling2"
+ },
+ {
+ "$ref": "#/components/parameters/dataFormat2"
+ },
+ {
+ "$ref": "#/components/parameters/timeoutMS"
+ },
+ {
+ "$ref": "#/components/parameters/callback"
+ },
+ {
+ "$ref": "#/components/parameters/filename"
+ },
+ {
+ "$ref": "#/components/parameters/tqx"
}
],
"responses": {
"200": {
- "description": "A javascript object with detailed information about the context.",
+ "description": "The call was successful. The response includes the data in the format requested.\n",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/context"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/jsonwrap2"
+ },
+ {
+ "$ref": "#/components/schemas/data_json_formats2"
+ }
+ ]
+ }
+ },
+ "text/plain": {
+ "schema": {
+ "type": "string",
+ "format": "according to the format requested."
+ }
+ },
+ "text/html": {
+ "schema": {
+ "type": "string",
+ "format": "html"
+ }
+ },
+ "application/x-javascript": {
+ "schema": {
+ "type": "string",
+ "format": "javascript"
}
}
}
},
"400": {
- "description": "No context id was supplied in the request."
+ "description": "Bad request - the body will include a message stating what is wrong.\n"
},
- "404": {
- "description": "No context with the given id is found."
+ "500": {
+ "description": "Internal server error. This usually means the server is out of memory.\n"
}
}
}
},
- "/alarm_variables": {
+ "/api/v1/data": {
"get": {
- "summary": "List variables available to configure alarms for a chart",
- "description": "Returns the basic information of a chart and all the variables that can be used in alarm and template health configurations for the particular chart or family.",
+ "operationId": "dataQuery1",
+ "tags": [
+ "data"
+ ],
+ "summary": "Data Query v1 - Single node, single chart or context queries. without group-by.",
+ "description": "Query metric data of a chart or context of a node and return a dataset having time-series data for all dimensions available.\nFor group-by functionality, use `/api/v2/data`.\nAt least a `chart` or a `context` have to be given for the data query to be executed.\n",
"parameters": [
{
- "name": "chart",
- "in": "query",
- "description": "The id of the chart as returned by the /charts call.",
- "required": true,
- "schema": {
- "type": "string",
- "format": "as returned by /charts",
- "default": "system.cpu"
- }
+ "$ref": "#/components/parameters/chart"
+ },
+ {
+ "$ref": "#/components/parameters/context"
+ },
+ {
+ "$ref": "#/components/parameters/dimension"
+ },
+ {
+ "$ref": "#/components/parameters/chart_label_key"
+ },
+ {
+ "$ref": "#/components/parameters/chart_labels_filter"
+ },
+ {
+ "$ref": "#/components/parameters/after"
+ },
+ {
+ "$ref": "#/components/parameters/before"
+ },
+ {
+ "$ref": "#/components/parameters/points"
+ },
+ {
+ "$ref": "#/components/parameters/tier"
+ },
+ {
+ "$ref": "#/components/parameters/dataQueryOptions"
+ },
+ {
+ "$ref": "#/components/parameters/dataFormat1"
+ },
+ {
+ "$ref": "#/components/parameters/dataTimeGroup1"
+ },
+ {
+ "$ref": "#/components/parameters/dataTimeGroupOptions1"
+ },
+ {
+ "$ref": "#/components/parameters/dataTimeResampling1"
+ },
+ {
+ "$ref": "#/components/parameters/timeoutMS"
+ },
+ {
+ "$ref": "#/components/parameters/callback"
+ },
+ {
+ "$ref": "#/components/parameters/filename"
+ },
+ {
+ "$ref": "#/components/parameters/tqx"
}
],
"responses": {
"200": {
- "description": "A javascript object with information about the chart and the available variables.",
+ "description": "The call was successful. The response includes the data in the format requested.\n",
"content": {
"application/json": {
"schema": {
- "$ref": "#/components/schemas/alarm_variables"
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/jsonwrap1"
+ },
+ {
+ "$ref": "#/components/schemas/data_json_formats1"
+ }
+ ]
+ }
+ },
+ "text/plain": {
+ "schema": {
+ "type": "string",
+ "format": "according to the format requested."
+ }
+ },
+ "text/html": {
+ "schema": {
+ "type": "string",
+ "format": "html"
+ }
+ },
+ "application/x-javascript": {
+ "schema": {
+ "type": "string",
+ "format": "javascript"
}
}
}
@@ -327,7 +648,7 @@
"description": "Bad request - the body will include a message stating what is wrong."
},
"404": {
- "description": "No chart with the given id is found."
+ "description": "Chart or context is not found. The supplied chart or context will be reported."
},
"500": {
"description": "Internal server error. This usually means the server is out of memory."
@@ -335,449 +656,221 @@
}
}
},
- "/data": {
+ "/api/v1/allmetrics": {
"get": {
- "summary": "Get collected data for a specific chart",
- "description": "The data endpoint returns data stored in the round robin database of a chart.",
+ "operationId": "allMetrics1",
+ "tags": [
+ "data"
+ ],
+ "summary": "All Metrics v1 - Fetch latest value for all metrics",
+ "description": "The `allmetrics` endpoint returns the latest value of all metrics maintained for a netdata node.\n",
"parameters": [
{
- "name": "chart",
+ "name": "format",
"in": "query",
- "description": "The id of the chart as returned by the /charts call. Note chart or context must be specified",
- "required": false,
- "allowEmptyValue": false,
+ "description": "The format of the response to be returned.",
+ "required": true,
"schema": {
"type": "string",
- "format": "as returned by /charts",
- "default": "system.cpu"
+ "enum": [
+ "shell",
+ "prometheus",
+ "prometheus_all_hosts",
+ "json"
+ ],
+ "default": "shell"
}
},
{
- "name": "context",
+ "name": "filter",
"in": "query",
- "description": "The context of the chart as returned by the /charts call. Note chart or context must be specified",
+ "description": "Allows to filter charts out using simple patterns.",
"required": false,
- "allowEmptyValue": false,
"schema": {
"type": "string",
- "format": "as returned by /charts"
- }
- },
- {
- "name": "dimension",
- "in": "query",
- "description": "Zero, one or more dimension ids or names, as returned by the /chart call, separated with comma or pipe. Netdata simple patterns are supported.",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "array",
- "items": {
- "type": "string",
- "format": "as returned by /charts"
- }
- }
- },
- {
- "name": "after",
- "in": "query",
- "description": "This parameter can either be an absolute timestamp specifying the starting point of the data to be returned, or a relative number of seconds (negative, relative to parameter: before). Netdata will assume it is a relative number if it is less that 3 years (in seconds). If not specified the default is -600 seconds. Netdata will adapt this parameter to the boundaries of the round robin database unless the allow_past option is specified.",
- "required": true,
- "allowEmptyValue": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": -600
- }
- },
- {
- "name": "before",
- "in": "query",
- "description": "This parameter can either be an absolute timestamp specifying the ending point of the data to be returned, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds). Netdata will adapt this parameter to the boundaries of the round robin database. The default is zero (i.e. the timestamp of the last value collected).",
- "required": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": 0
- }
- },
- {
- "name": "points",
- "in": "query",
- "description": "The number of points to be returned. If not given, or it is <= 0, or it is bigger than the points stored in the round robin database for this chart for the given duration, all the available collected values for the given duration will be returned.",
- "required": true,
- "allowEmptyValue": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": 20
+ "format": "any text"
}
},
{
- "name": "chart_label_key",
+ "name": "variables",
"in": "query",
- "description": "Specify the chart label keys that need to match for context queries as comma separated values. At least one matching key is needed to match the corresponding chart.",
+ "description": "When enabled, netdata will expose various system configuration variables.\n",
"required": false,
- "allowEmptyValue": false,
"schema": {
"type": "string",
- "format": "key1,key2,key3"
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "no"
}
},
{
- "name": "chart_labels_filter",
+ "name": "help",
"in": "query",
- "description": "Specify the chart label keys and values to match for context queries. All keys/values need to match for the chart to be included in the query. The labels are specified as key1:value1,key2:value2",
+ "description": "Enable or disable HELP lines in prometheus output.\n",
"required": false,
- "allowEmptyValue": false,
"schema": {
"type": "string",
- "format": "key1:value1,key2:value2,key3:value3"
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "no"
}
},
{
- "name": "group",
+ "name": "types",
"in": "query",
- "description": "The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods supported \"min\", \"max\", \"average\", \"sum\", \"incremental-sum\". \"max\" is actually calculated on the absolute value collected (so it works for both positive and negative dimensions to return the most extreme value in either direction).",
- "required": true,
- "allowEmptyValue": false,
+ "description": "Enable or disable TYPE lines in prometheus output.\n",
+ "required": false,
"schema": {
"type": "string",
"enum": [
- "min",
- "max",
- "average",
- "median",
- "stddev",
- "sum",
- "incremental-sum",
- "ses",
- "des",
- "cv",
- "countif",
- "percentile",
- "percentile25",
- "percentile50",
- "percentile75",
- "percentile80",
- "percentile90",
- "percentile95",
- "percentile97",
- "percentile98",
- "percentile99",
- "trimmed-mean",
- "trimmed-mean1",
- "trimmed-mean2",
- "trimmed-mean3",
- "trimmed-mean5",
- "trimmed-mean10",
- "trimmed-mean15",
- "trimmed-mean20",
- "trimmed-mean25",
- "trimmed-median",
- "trimmed-median1",
- "trimmed-median2",
- "trimmed-median3",
- "trimmed-median5",
- "trimmed-median10",
- "trimmed-median15",
- "trimmed-median20",
- "trimmed-median25"
+ "yes",
+ "no"
],
- "default": "average"
+ "default": "no"
}
},
{
- "name": "group_options",
+ "name": "timestamps",
"in": "query",
- "description": "When the group function supports additional parameters, this field can be used to pass them to it. Currently only \"countif\" supports this.",
+ "description": "Enable or disable timestamps in prometheus output.\n",
"required": false,
- "allowEmptyValue": false,
"schema": {
- "type": "string"
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "yes"
}
},
{
- "name": "gtime",
+ "name": "names",
"in": "query",
- "description": "The grouping number of seconds. This is used in conjunction with group=average to change the units of metrics (ie when the data is per-second, setting gtime=60 will turn them to per-minute).",
+ "description": "When enabled netdata will report dimension names. When disabled netdata will report dimension IDs. The default is controlled in netdata.conf.\n",
"required": false,
- "allowEmptyValue": false,
"schema": {
- "type": "number",
- "format": "integer",
- "default": 0
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "yes"
}
},
{
- "name": "timeout",
+ "name": "oldunits",
"in": "query",
- "description": "Specify a timeout value in milliseconds after which the agent will abort the query and return a 503 error. A value of 0 indicates no timeout.",
+ "description": "When enabled, netdata will show metric names for the default `source=average` as they appeared before 1.12, by using the legacy unit naming conventions.\n",
"required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": 0
- }
- },
- {
- "name": "format",
- "in": "query",
- "description": "The format of the data to be returned.",
- "required": true,
- "allowEmptyValue": false,
"schema": {
"type": "string",
"enum": [
- "json",
- "jsonp",
- "csv",
- "tsv",
- "tsv-excel",
- "ssv",
- "ssvcomma",
- "datatable",
- "datasource",
- "html",
- "markdown",
- "array",
- "csvjsonarray"
+ "yes",
+ "no"
],
- "default": "json"
+ "default": "yes"
}
},
{
- "name": "options",
+ "name": "hideunits",
"in": "query",
- "description": "Options that affect data generation.",
+ "description": "When enabled, netdata will not include the units in the metric names, for the default `source=average`.\n",
"required": false,
- "allowEmptyValue": false,
"schema": {
- "type": "array",
- "items": {
- "type": "string",
- "enum": [
- "nonzero",
- "flip",
- "jsonwrap",
- "min2max",
- "seconds",
- "milliseconds",
- "abs",
- "absolute",
- "absolute-sum",
- "null2zero",
- "objectrows",
- "google_json",
- "percentage",
- "unaligned",
- "match-ids",
- "match-names",
- "allow_past",
- "anomaly-bit"
- ]
- },
- "default": [
- "seconds",
- "jsonwrap"
- ]
+ "type": "string",
+ "enum": [
+ "yes",
+ "no"
+ ],
+ "default": "yes"
}
},
{
- "name": "callback",
+ "name": "server",
"in": "query",
- "description": "For JSONP responses, the callback function name.",
+ "description": "Set a distinct name of the client querying prometheus metrics. Netdata will use the client IP if this is not set.\n",
"required": false,
- "allowEmptyValue": true,
"schema": {
- "type": "string"
+ "type": "string",
+ "format": "any text"
}
},
{
- "name": "filename",
+ "name": "prefix",
"in": "query",
- "description": "Add Content-Disposition: attachment; filename= header to the response, that will instruct the browser to save the response with the given filename.",
+ "description": "Prefix all prometheus metrics with this string.\n",
"required": false,
- "allowEmptyValue": true,
"schema": {
- "type": "string"
+ "type": "string",
+ "format": "any text"
}
},
{
- "name": "tqx",
+ "name": "data",
"in": "query",
- "description": "[Google Visualization API](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source?hl=en) formatted parameter.",
+ "description": "Select the prometheus response data source. There is a setting in netdata.conf for the default.\n",
"required": false,
- "allowEmptyValue": true,
"schema": {
- "type": "string"
+ "type": "string",
+ "enum": [
+ "as-collected",
+ "average",
+ "sum"
+ ],
+ "default": "average"
}
}
],
"responses": {
"200": {
- "description": "The call was successful. The response includes the data in the format requested. Swagger2.0 does not process the discriminator field to show polymorphism. The response will be one of the sub-types of the data-schema according to the chosen format, e.g. json -> data_json.",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/data"
- }
- }
- }
+ "description": "All the metrics returned in the format requested."
},
"400": {
- "description": "Bad request - the body will include a message stating what is wrong."
- },
- "404": {
- "description": "Chart or context is not found. The supplied chart or context will be reported."
- },
- "500": {
- "description": "Internal server error. This usually means the server is out of memory."
+ "description": "The format requested is not supported."
}
}
}
},
- "/badge.svg": {
+ "/api/v1/badge.svg": {
"get": {
+ "operationId": "badge1",
+ "tags": [
+ "badges"
+ ],
"summary": "Generate a badge in form of SVG image for a chart (or dimension)",
"description": "Successful responses are SVG images.",
"parameters": [
{
- "name": "chart",
- "in": "query",
- "description": "The id of the chart as returned by the /charts call.",
- "required": true,
- "allowEmptyValue": false,
- "schema": {
- "type": "string",
- "format": "as returned by /charts",
- "default": "system.cpu"
- }
+ "$ref": "#/components/parameters/chart"
},
{
- "name": "alarm",
- "in": "query",
- "description": "The name of an alarm linked to the chart.",
- "required": false,
- "allowEmptyValue": true,
- "schema": {
- "type": "string",
- "format": "any text"
- }
+ "$ref": "#/components/parameters/dimension"
},
{
- "name": "dimension",
- "in": "query",
- "description": "Zero, one or more dimension ids, as returned by the /chart call.",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "array",
- "items": {
- "type": "string",
- "format": "as returned by /charts"
- }
- }
+ "$ref": "#/components/parameters/after"
},
{
- "name": "after",
- "in": "query",
- "description": "This parameter can either be an absolute timestamp specifying the starting point of the data to be returned, or a relative number of seconds, to the last collected timestamp. Netdata will assume it is a relative number if it is smaller than the duration of the round robin database for this chart. So, if the round robin database is 3600 seconds, any value from -3600 to 3600 will trigger relative arithmetics. Netdata will adapt this parameter to the boundaries of the round robin database.",
- "required": true,
- "allowEmptyValue": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": -600
- }
+ "$ref": "#/components/parameters/before"
},
{
- "name": "before",
- "in": "query",
- "description": "This parameter can either be an absolute timestamp specifying the ending point of the data to be returned, or a relative number of seconds, to the last collected timestamp. Netdata will assume it is a relative number if it is smaller than the duration of the round robin database for this chart. So, if the round robin database is 3600 seconds, any value from -3600 to 3600 will trigger relative arithmetics. Netdata will adapt this parameter to the boundaries of the round robin database.",
- "required": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": 0
- }
+ "$ref": "#/components/parameters/dataTimeGroup1"
},
{
- "name": "group",
- "in": "query",
- "description": "The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods are supported \"min\", \"max\", \"average\", \"sum\", \"incremental-sum\". \"max\" is actually calculated on the absolute value collected (so it works for both positive and negative dimensions to return the most extreme value in either direction).",
- "required": true,
- "allowEmptyValue": false,
- "schema": {
- "type": "string",
- "enum": [
- "min",
- "max",
- "average",
- "median",
- "stddev",
- "sum",
- "incremental-sum",
- "ses",
- "des",
- "cv",
- "countif",
- "percentile",
- "percentile25",
- "percentile50",
- "percentile75",
- "percentile80",
- "percentile90",
- "percentile95",
- "percentile97",
- "percentile98",
- "percentile99",
- "trimmed-mean",
- "trimmed-mean1",
- "trimmed-mean2",
- "trimmed-mean3",
- "trimmed-mean5",
- "trimmed-mean10",
- "trimmed-mean15",
- "trimmed-mean20",
- "trimmed-mean25",
- "trimmed-median",
- "trimmed-median1",
- "trimmed-median2",
- "trimmed-median3",
- "trimmed-median5",
- "trimmed-median10",
- "trimmed-median15",
- "trimmed-median20",
- "trimmed-median25"
- ],
- "default": "average"
- }
+ "$ref": "#/components/parameters/dataQueryOptions"
},
{
- "name": "options",
+ "name": "alarm",
"in": "query",
- "description": "Options that affect data generation.",
+ "description": "The name of an alarm linked to the chart.",
"required": false,
"allowEmptyValue": true,
"schema": {
- "type": "array",
- "items": {
- "type": "string",
- "enum": [
- "abs",
- "absolute",
- "display-absolute",
- "absolute-sum",
- "null2zero",
- "percentage",
- "unaligned",
- "anomaly-bit"
- ]
- },
- "default": [
- "absolute"
- ]
+ "type": "string",
+ "format": "any text"
}
},
{
@@ -805,7 +898,7 @@
{
"name": "label_color",
"in": "query",
- "description": "A color to be used for the background of the label side(left side) of the badge. One of predefined colors or specific color in hex `RGB` or `RRGGBB` format (without preceding `#` character). If value wrong or not given default color will be used.",
+ "description": "A color to be used for the background of the label side(left side) of the badge. One of predefined colors or specific color in hex `RGB` or `RRGGBB` format (without preceding `#` character). If value wrong or not given default color will be used.\n",
"required": false,
"allowEmptyValue": true,
"schema": {
@@ -836,7 +929,7 @@
{
"name": "value_color",
"in": "query",
- "description": "A color to be used for the background of the value *(right)* part of badge. You can set multiple using a pipe with a condition each, like this: `color<value|color:null` The following operators are supported: >, <, >=, <=, =, :null (to check if no value exists). Each color can be specified in same manner as for `label_color` parameter. Currently only integers are supported as values.",
+ "description": "A color to be used for the background of the value *(right)* part of badge. You can set multiple using a pipe with a condition each, like this: `color<value|color:null` The following operators are supported: >, <, >=, <=, =, :null (to check if no value exists). Each color can be specified in same manner as for `label_color` parameter. Currently only integers are supported as values.\n",
"required": false,
"allowEmptyValue": true,
"schema": {
@@ -847,7 +940,7 @@
{
"name": "text_color_lbl",
"in": "query",
- "description": "Font color for label *(left)* part of the badge. One of predefined colors or as HTML hexadecimal color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default color will be used.",
+ "description": "Font color for label *(left)* part of the badge. One of predefined colors or as HTML hexadecimal color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default color will be used.\n",
"required": false,
"allowEmptyValue": true,
"schema": {
@@ -878,7 +971,7 @@
{
"name": "text_color_val",
"in": "query",
- "description": "Font color for value *(right)* part of the badge. One of predefined colors or as HTML hexadecimal color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default color will be used.",
+ "description": "Font color for value *(right)* part of the badge. One of predefined colors or as HTML hexadecimal color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default color will be used.\n",
"required": false,
"allowEmptyValue": true,
"schema": {
@@ -942,7 +1035,7 @@
{
"name": "fixed_width_lbl",
"in": "query",
- "description": "This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter determines the size of the label's left side *(label/name)*. You must set this parameter together with `fixed_width_val` otherwise it will be ignored. You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.",
+ "description": "This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter determines the size of the label's left side *(label/name)*. You must set this parameter together with `fixed_width_val` otherwise it will be ignored. You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.\n",
"required": false,
"allowEmptyValue": false,
"schema": {
@@ -953,7 +1046,7 @@
{
"name": "fixed_width_val",
"in": "query",
- "description": "This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter determines the size of the label's right side *(value)*. You must set this parameter together with `fixed_width_lbl` otherwise it will be ignored. You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.",
+ "description": "This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter determines the size of the label's right side *(value)*. You must set this parameter together with `fixed_width_lbl` otherwise it will be ignored. You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.\n",
"required": false,
"allowEmptyValue": false,
"schema": {
@@ -978,185 +1071,310 @@
}
}
},
- "/allmetrics": {
+ "/api/v2/weights": {
"get": {
- "summary": "Get a value of all the metrics maintained by netdata",
- "description": "The allmetrics endpoint returns the latest value of all charts and dimensions stored in the netdata server.",
+ "operationId": "weights2",
+ "tags": [
+ "weights"
+ ],
+ "summary": "Score or weight all or some of the metrics, across all nodes, according to various algorithms.",
+ "description": "This endpoint goes through all metrics and scores them according to an algorithm.\n",
"parameters": [
{
- "name": "format",
- "in": "query",
- "description": "The format of the response to be returned.",
- "required": true,
- "schema": {
- "type": "string",
- "enum": [
- "shell",
- "prometheus",
- "prometheus_all_hosts",
- "json"
- ],
- "default": "shell"
- }
+ "$ref": "#/components/parameters/weightMethods"
},
{
- "name": "filter",
- "in": "query",
- "description": "Allows to filter charts out using simple patterns.",
- "required": false,
- "schema": {
- "type": "string",
- "format": "any text"
- }
+ "$ref": "#/components/parameters/scopeNodes"
},
{
- "name": "variables",
- "in": "query",
- "description": "When enabled, netdata will expose various system configuration metrics.",
- "required": false,
- "schema": {
- "type": "string",
- "enum": [
- "yes",
- "no"
- ],
- "default": "no"
- }
+ "$ref": "#/components/parameters/scopeContexts"
},
{
- "name": "help",
- "in": "query",
- "description": "Enable or disable HELP lines in prometheus output.",
- "required": false,
- "schema": {
- "type": "string",
- "enum": [
- "yes",
- "no"
- ],
- "default": "no"
- }
+ "$ref": "#/components/parameters/filterNodes"
},
{
- "name": "types",
- "in": "query",
- "description": "Enable or disable TYPE lines in prometheus output.",
- "required": false,
- "schema": {
- "type": "string",
- "enum": [
- "yes",
- "no"
- ],
- "default": "no"
- }
+ "$ref": "#/components/parameters/filterContexts"
},
{
- "name": "timestamps",
- "in": "query",
- "description": "Enable or disable timestamps in prometheus output.",
- "required": false,
- "schema": {
- "type": "string",
- "enum": [
- "yes",
- "no"
- ],
- "default": "yes"
- }
+ "$ref": "#/components/parameters/filterInstances"
},
{
- "name": "names",
- "in": "query",
- "description": "When enabled netdata will report dimension names. When disabled netdata will report dimension IDs. The default is controlled in netdata.conf.",
- "required": false,
- "schema": {
- "type": "string",
- "enum": [
- "yes",
- "no"
- ],
- "default": "yes"
- }
+ "$ref": "#/components/parameters/filterLabels"
},
{
- "name": "oldunits",
- "in": "query",
- "description": "When enabled, netdata will show metric names for the default source=average as they appeared before 1.12, by using the legacy unit naming conventions.",
- "required": false,
- "schema": {
- "type": "string",
- "enum": [
- "yes",
- "no"
- ],
- "default": "yes"
- }
+ "$ref": "#/components/parameters/filterAlerts"
},
{
- "name": "hideunits",
- "in": "query",
- "description": "When enabled, netdata will not include the units in the metric names, for the default source=average.",
- "required": false,
- "schema": {
- "type": "string",
- "enum": [
- "yes",
- "no"
- ],
- "default": "yes"
+ "$ref": "#/components/parameters/filterDimensions"
+ },
+ {
+ "$ref": "#/components/parameters/baselineAfter"
+ },
+ {
+ "$ref": "#/components/parameters/baselineBefore"
+ },
+ {
+ "$ref": "#/components/parameters/after"
+ },
+ {
+ "$ref": "#/components/parameters/before"
+ },
+ {
+ "$ref": "#/components/parameters/tier"
+ },
+ {
+ "$ref": "#/components/parameters/points"
+ },
+ {
+ "$ref": "#/components/parameters/timeoutMS"
+ },
+ {
+ "$ref": "#/components/parameters/dataQueryOptions"
+ },
+ {
+ "$ref": "#/components/parameters/dataTimeGroup2"
+ },
+ {
+ "$ref": "#/components/parameters/dataTimeGroupOptions2"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "JSON object with weights for each context, chart and dimension.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/weights2"
+ }
+ }
}
},
+ "400": {
+ "description": "The given parameters are invalid."
+ },
+ "403": {
+ "description": "metrics correlations are not enabled on this Netdata Agent."
+ },
+ "404": {
+ "description": "No charts could be found, or the method that correlated the metrics did not produce any result.\n"
+ },
+ "504": {
+ "description": "Timeout - the query took too long and has been cancelled."
+ }
+ }
+ }
+ },
+ "/api/v1/weights": {
+ "get": {
+ "operationId": "weights1",
+ "tags": [
+ "weights"
+ ],
+ "summary": "Score or weight all or some of the metrics of a single node, according to various algorithms.",
+ "description": "This endpoint goes through all metrics and scores them according to an algorithm.\n",
+ "parameters": [
{
- "name": "server",
- "in": "query",
- "description": "Set a distinct name of the client querying prometheus metrics. Netdata will use the client IP if this is not set.",
- "required": false,
- "schema": {
- "type": "string",
- "format": "any text"
+ "$ref": "#/components/parameters/weightMethods"
+ },
+ {
+ "$ref": "#/components/parameters/context"
+ },
+ {
+ "$ref": "#/components/parameters/baselineAfter"
+ },
+ {
+ "$ref": "#/components/parameters/baselineBefore"
+ },
+ {
+ "$ref": "#/components/parameters/after"
+ },
+ {
+ "$ref": "#/components/parameters/before"
+ },
+ {
+ "$ref": "#/components/parameters/tier"
+ },
+ {
+ "$ref": "#/components/parameters/points"
+ },
+ {
+ "$ref": "#/components/parameters/timeoutMS"
+ },
+ {
+ "$ref": "#/components/parameters/dataQueryOptions"
+ },
+ {
+ "$ref": "#/components/parameters/dataTimeGroup1"
+ },
+ {
+ "$ref": "#/components/parameters/dataTimeGroupOptions1"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "JSON object with weights for each context, chart and dimension.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/weights"
+ }
+ }
}
},
+ "400": {
+ "description": "The given parameters are invalid."
+ },
+ "403": {
+ "description": "metrics correlations are not enabled on this Netdata Agent."
+ },
+ "404": {
+ "description": "No charts could be found, or the method that correlated the metrics did not produce any result."
+ },
+ "504": {
+ "description": "Timeout - the query took too long and has been cancelled."
+ }
+ }
+ }
+ },
+ "/api/v1/metric_correlations": {
+ "get": {
+ "operationId": "metricCorrelations1",
+ "tags": [
+ "weights"
+ ],
+ "summary": "Analyze all the metrics to find their correlations - EOL",
+ "description": "THIS ENDPOINT IS OBSOLETE. Use the /weights endpoint. Given two time-windows (baseline, highlight), it goes through all the available metrics, querying both windows and tries to find how these two windows relate to each other. It supports multiple algorithms to do so. The result is a list of all metrics evaluated, weighted for 0.0 (the two windows are more different) to 1.0 (the two windows are similar). The algorithm adjusts automatically the baseline window to be a power of two multiple of the highlighted (1, 2, 4, 8, etc).\n",
+ "parameters": [
{
- "name": "prefix",
- "in": "query",
- "description": "Prefix all prometheus metrics with this string.",
- "required": false,
- "schema": {
- "type": "string",
- "format": "any text"
+ "$ref": "#/components/parameters/weightMethods"
+ },
+ {
+ "$ref": "#/components/parameters/baselineAfter"
+ },
+ {
+ "$ref": "#/components/parameters/baselineBefore"
+ },
+ {
+ "$ref": "#/components/parameters/after"
+ },
+ {
+ "$ref": "#/components/parameters/before"
+ },
+ {
+ "$ref": "#/components/parameters/points"
+ },
+ {
+ "$ref": "#/components/parameters/tier"
+ },
+ {
+ "$ref": "#/components/parameters/timeoutMS"
+ },
+ {
+ "$ref": "#/components/parameters/dataQueryOptions"
+ },
+ {
+ "$ref": "#/components/parameters/dataTimeGroup1"
+ },
+ {
+ "$ref": "#/components/parameters/dataTimeGroupOptions1"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "JSON object with weights for each chart and dimension.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/metric_correlations"
+ }
+ }
}
},
+ "400": {
+ "description": "The given parameters are invalid."
+ },
+ "403": {
+ "description": "metrics correlations are not enabled on this Netdata Agent."
+ },
+ "404": {
+ "description": "No charts could be found, or the method that correlated the metrics did not produce any result."
+ },
+ "504": {
+ "description": "Timeout - the query took too long and has been cancelled."
+ }
+ }
+ }
+ },
+ "/api/v1/function": {
+ "get": {
+ "operationId": "function1",
+ "tags": [
+ "functions"
+ ],
+ "description": "Execute a collector function.",
+ "parameters": [
{
- "name": "data",
+ "name": "function",
"in": "query",
- "description": "Select the prometheus response data source. There is a setting in netdata.conf for the default.",
- "required": false,
+ "description": "The name of the function, as returned by the collector.",
+ "required": true,
+ "allowEmptyValue": false,
"schema": {
- "type": "string",
- "enum": [
- "as-collected",
- "average",
- "sum"
- ],
- "default": "average"
+ "type": "string"
}
+ },
+ {
+ "$ref": "#/components/parameters/timeoutSecs"
}
],
"responses": {
"200": {
- "description": "All the metrics returned in the format requested."
+ "description": "The collector function has been executed successfully. Each collector may return a different type of content."
},
"400": {
- "description": "The format requested is not supported."
+ "description": "The request was rejected by the collector."
+ },
+ "404": {
+ "description": "The requested function is not found."
+ },
+ "500": {
+ "description": "Other internal error, getting this error means there is a bug in Netdata."
+ },
+ "503": {
+ "description": "The collector to execute the function is not currently available."
+ },
+ "504": {
+ "description": "Timeout while waiting for the collector to execute the function."
+ },
+ "591": {
+ "description": "The collector sent a response, but it was invalid or corrupted."
+ }
+ }
+ }
+ },
+ "/api/v1/functions": {
+ "get": {
+ "operationId": "functions1",
+ "tags": [
+ "functions"
+ ],
+ "summary": "Get a list of all registered collector functions.",
+ "description": "Collector functions are programs that can be executed on demand.",
+ "responses": {
+ "200": {
+ "description": "A JSON object containing one object per supported function."
}
}
}
},
- "/alarms": {
+ "/api/v1/alarms": {
"get": {
+ "operationId": "alerts1",
+ "tags": [
+ "alerts"
+ ],
"summary": "Get a list of active or raised alarms on the server",
- "description": "The alarms endpoint returns the list of all raised or enabled alarms on the netdata server. Called without any parameters, the raised alarms in state WARNING or CRITICAL are returned. By passing \"?all\", all the enabled alarms are returned.",
+ "description": "The alarms endpoint returns the list of all raised or enabled alarms on the netdata server. Called without any parameters, the raised alarms in state WARNING or CRITICAL are returned. By passing \"?all\", all the enabled alarms are returned.\n",
"parameters": [
{
"name": "all",
@@ -1193,10 +1411,14 @@
}
}
},
- "/alarms_values": {
+ "/api/v1/alarms_values": {
"get": {
+ "operationId": "alertValues1",
+ "tags": [
+ "alerts"
+ ],
"summary": "Get a list of active or raised alarms on the server",
- "description": "The alarms_values endpoint returns the list of all raised or enabled alarms on the netdata server. Called without any parameters, the raised alarms in state WARNING or CRITICAL are returned. By passing '?all', all the enabled alarms are returned. This option output differs from `/alarms` in the number of variables delivered. This endpoint gives to user `id`, `value`, `last_updated` time, and alarm `status`.",
+ "description": "The alarms_values endpoint returns the list of all raised or enabled alarms on the netdata server. Called without any parameters, the raised alarms in state WARNING or CRITICAL are returned. By passing '?all', all the enabled alarms are returned. This option output differs from `/alarms` in the number of variables delivered. This endpoint gives to user `id`, `value`, `last_updated` time, and alarm `status`.\n",
"parameters": [
{
"name": "all",
@@ -1233,15 +1455,19 @@
}
}
},
- "/alarm_log": {
+ "/api/v1/alarm_log": {
"get": {
+ "operationId": "alertsLog1",
+ "tags": [
+ "alerts"
+ ],
"summary": "Retrieves the entries of the alarm log",
- "description": "Returns an array of alarm_log entries, with historical information on raised and cleared alarms.",
+ "description": "Returns an array of alarm_log entries, with historical information on raised and cleared alarms.\n",
"parameters": [
{
"name": "after",
"in": "query",
- "description": "Passing the parameter after=UNIQUEID returns all the events in the alarm log that occurred after UNIQUEID. An automated series of calls would call the interface once without after=, store the last UNIQUEID of the returned set, and give it back to get incrementally the next events.",
+ "description": "Passing the parameter after=UNIQUEID returns all the events in the alarm log that occurred after UNIQUEID. An automated series of calls would call the interface once without after=, store the last UNIQUEID of the returned set, and give it back to get incrementally the next events.\n",
"required": false,
"schema": {
"type": "integer"
@@ -1265,30 +1491,21 @@
}
}
},
- "/alarm_count": {
+ "/api/v1/alarm_count": {
"get": {
+ "operationId": "alertsCount1",
+ "tags": [
+ "alerts"
+ ],
"summary": "Get an overall status of the chart",
- "description": "Checks multiple charts with the same context and counts number of alarms with given status.",
+ "description": "Checks multiple charts with the same context and counts number of alarms with given status.\n",
"parameters": [
{
- "in": "query",
- "name": "context",
- "description": "Specify context which should be checked.",
- "required": false,
- "allowEmptyValue": true,
- "schema": {
- "type": "array",
- "items": {
- "type": "string"
- },
- "default": [
- "system.cpu"
- ]
- }
+ "$ref": "#/components/parameters/context"
},
{
- "in": "query",
"name": "status",
+ "in": "query",
"description": "Specify alarm status to count.",
"required": false,
"allowEmptyValue": true,
@@ -1327,15 +1544,63 @@
}
}
},
- "/manage/health": {
+ "/api/v1/alarm_variables": {
+ "get": {
+ "operationId": "getNodeAlertVariables1",
+ "tags": [
+ "alerts"
+ ],
+ "summary": "List variables available to configure alarms for a chart",
+ "description": "Returns the basic information of a chart and all the variables that can be used in alarm and template health configurations for the particular chart or family.\n",
+ "parameters": [
+ {
+ "name": "chart",
+ "in": "query",
+ "description": "The id of the chart as returned by the /charts call.",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "format": "as returned by /charts",
+ "default": "system.cpu"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "A javascript object with information about the chart and the available variables.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/alarm_variables"
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "Bad request - the body will include a message stating what is wrong."
+ },
+ "404": {
+ "description": "No chart with the given id is found."
+ },
+ "500": {
+ "description": "Internal server error. This usually means the server is out of memory."
+ }
+ }
+ }
+ },
+ "/api/v1/manage/health": {
"get": {
- "summary": "Accesses the health management API to control health checks and notifications at runtime.",
- "description": "Available from Netdata v1.12 and above, protected via bearer authorization. Especially useful for maintenance periods, the API allows you to disable health checks completely, silence alarm notifications, or Disable/Silence specific alarms that match selectors on alarm/template name, chart, context, host and family. For the simple disable/silence all scenarios, only the cmd parameter is required. The other parameters are used to define alarm selectors. For more information and examples, refer to the netdata documentation.",
+ "operationId": "health1",
+ "tags": [
+ "management"
+ ],
+ "summary": "Accesses the health management API to control health checks and notifications at runtime.\n",
+ "description": "Available from Netdata v1.12 and above, protected via bearer authorization. Especially useful for maintenance periods, the API allows you to disable health checks completely, silence alarm notifications, or Disable/Silence specific alarms that match selectors on alarm/template name, chart, context, host and family. For the simple disable/silence all scenarios, only the cmd parameter is required. The other parameters are used to define alarm selectors. For more information and examples, refer to the netdata documentation.\n",
"parameters": [
{
"name": "cmd",
"in": "query",
- "description": "DISABLE ALL: No alarm criteria are evaluated, nothing is written in the alarm log. SILENCE ALL: No notifications are sent. RESET: Return to the default state. DISABLE/SILENCE: Set the mode to be used for the alarms matching the criteria of the alarm selectors. LIST: Show active configuration.",
+ "description": "DISABLE ALL: No alarm criteria are evaluated, nothing is written in the alarm log. SILENCE ALL: No notifications are sent. RESET: Return to the default state. DISABLE/SILENCE: Set the mode to be used for the alarms matching the criteria of the alarm selectors. LIST: Show active configuration.\n",
"required": false,
"schema": {
"type": "string",
@@ -1400,10 +1665,14 @@
}
}
},
- "/aclk": {
+ "/api/v1/aclk": {
"get": {
+ "operationId": "aclk1",
+ "tags": [
+ "management"
+ ],
"summary": "Get information about current ACLK state",
- "description": "ACLK endpoint returns detailed information about current state of ACLK (Agent to Cloud communication).",
+ "description": "ACLK endpoint returns detailed information about current state of ACLK (Agent to Cloud communication).\n",
"responses": {
"200": {
"description": "JSON object with ACLK information.",
@@ -1417,519 +1686,561 @@
}
}
}
- },
- "/metric_correlations": {
- "get": {
- "summary": "Analyze all the metrics to find their correlations",
- "description": "THIS ENDPOINT IS OBSOLETE. Use the /weights endpoint. Given two time-windows (baseline, highlight), it goes through all the available metrics, querying both windows and tries to find how these two windows relate to each other. It supports multiple algorithms to do so. The result is a list of all metrics evaluated, weighted for 0.0 (the two windows are more different) to 1.0 (the two windows are similar). The algorithm adjusts automatically the baseline window to be a power of two multiple of the highlighted (1, 2, 4, 8, etc).",
- "parameters": [
- {
- "name": "baseline_after",
- "in": "query",
- "description": "This parameter can either be an absolute timestamp specifying the starting point of baseline window, or a relative number of seconds (negative, relative to parameter baseline_before). Netdata will assume it is a relative number if it is less that 3 years (in seconds).",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": -300
- }
- },
- {
- "name": "baseline_before",
- "in": "query",
- "description": "This parameter can either be an absolute timestamp specifying the ending point of the baseline window, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds).",
- "required": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": -60
- }
- },
- {
- "name": "after",
- "in": "query",
- "description": "This parameter can either be an absolute timestamp specifying the starting point of highlighted window, or a relative number of seconds (negative, relative to parameter highlight_before). Netdata will assume it is a relative number if it is less that 3 years (in seconds).",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": -60
- }
- },
- {
- "name": "before",
- "in": "query",
- "description": "This parameter can either be an absolute timestamp specifying the ending point of the highlighted window, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds).",
- "required": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": 0
- }
- },
- {
- "name": "points",
- "in": "query",
- "description": "The number of points to be evaluated for the highlighted window. The baseline window will be adjusted automatically to receive a proportional amount of points.",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": 500
- }
- },
- {
- "name": "method",
- "in": "query",
- "description": "the algorithm to run",
- "required": false,
- "schema": {
- "type": "string",
- "enum": [
- "ks2",
- "volume"
- ],
- "default": "ks2"
- }
- },
- {
- "name": "timeout",
- "in": "query",
- "description": "Cancel the query if to takes more that this amount of milliseconds.",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": 60000
- }
- },
- {
- "name": "options",
- "in": "query",
- "description": "Options that affect data generation.",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "array",
- "items": {
- "type": "string",
- "enum": [
- "min2max",
- "abs",
- "absolute",
- "absolute-sum",
- "null2zero",
- "percentage",
- "unaligned",
- "allow_past",
- "nonzero",
- "anomaly-bit",
- "raw"
- ]
- },
- "default": [
- "null2zero",
- "allow_past",
- "nonzero",
- "unaligned"
- ]
- }
- },
- {
- "name": "group",
- "in": "query",
- "description": "The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods supported \"min\", \"max\", \"average\", \"sum\", \"incremental-sum\". \"max\" is actually calculated on the absolute value collected (so it works for both positive and negative dimensions to return the most extreme value in either direction).",
- "required": true,
- "allowEmptyValue": false,
- "schema": {
- "type": "string",
- "enum": [
- "min",
- "max",
- "average",
- "median",
- "stddev",
- "sum",
- "incremental-sum",
- "ses",
- "des",
- "cv",
- "countif",
- "percentile",
- "percentile25",
- "percentile50",
- "percentile75",
- "percentile80",
- "percentile90",
- "percentile95",
- "percentile97",
- "percentile98",
- "percentile99",
- "trimmed-mean",
- "trimmed-mean1",
- "trimmed-mean2",
- "trimmed-mean3",
- "trimmed-mean5",
- "trimmed-mean10",
- "trimmed-mean15",
- "trimmed-mean20",
- "trimmed-mean25",
- "trimmed-median",
- "trimmed-median1",
- "trimmed-median2",
- "trimmed-median3",
- "trimmed-median5",
- "trimmed-median10",
- "trimmed-median15",
- "trimmed-median20",
- "trimmed-median25"
- ],
- "default": "average"
- }
- },
- {
- "name": "group_options",
- "in": "query",
- "description": "When the group function supports additional parameters, this field can be used to pass them to it. Currently only \"countif\" supports this.",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "string"
- }
- }
- ],
- "responses": {
- "200": {
- "description": "JSON object with weights for each chart and dimension.",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/metric_correlations"
- }
- }
- }
- },
- "400": {
- "description": "The given parameters are invalid."
- },
- "403": {
- "description": "metrics correlations are not enabled on this Netdata Agent."
- },
- "404": {
- "description": "No charts could be found, or the method that correlated the metrics did not produce any result."
- },
- "504": {
- "description": "Timeout - the query took too long and has been cancelled."
- }
+ }
+ },
+ "components": {
+ "parameters": {
+ "scopeNodes": {
+ "name": "scope_nodes",
+ "in": "query",
+ "description": "A simple pattern limiting the nodes scope of the query. The scope controls both data and metadata response. The simple pattern is checked against the nodes' machine guid, node id and hostname. The default nodes scope is all nodes for which this agent has data for. Usually the nodes scope is used to slice the entire dashboard (e.g. the Global Nodes Selector at the Netdata Cloud overview dashboard). Both positive and negative simple pattern expressions are supported.\n",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "format": "simple pattern",
+ "default": "*"
}
- }
- },
- "/function": {
- "get": {
- "summary": "Execute a collector function.",
- "parameters": [
- {
- "name": "function",
- "in": "query",
- "description": "The name of the function, as returned by the collector.",
- "required": true,
- "allowEmptyValue": false,
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "timeout",
- "in": "query",
- "description": "The timeout in seconds to wait for the function to complete.",
- "required": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": 10
- }
- }
- ],
- "responses": {
- "200": {
- "description": "The collector function has been executed successfully. Each collector may return a different type of content."
- },
- "400": {
- "description": "The request was rejected by the collector."
- },
- "404": {
- "description": "The requested function is not found."
- },
- "500": {
- "description": "Other internal error, getting this error means there is a bug in Netdata."
- },
- "503": {
- "description": "The collector to execute the function is not currently available."
- },
- "504": {
- "description": "Timeout while waiting for the collector to execute the function."
- },
- "591": {
- "description": "The collector sent a response, but it was invalid or corrupted."
- }
+ },
+ "scopeContexts": {
+ "name": "scope_contexts",
+ "in": "query",
+ "description": "A simple pattern limiting the contexts scope of the query. The scope controls both data and metadata response. The default contexts scope is all contexts for which this agent has data for. Usually the contexts scope is used to slice data on the dashboard (e.g. each context based chart has its own contexts scope, limiting the chart to all the instances of the selected context). Both positive and negative simple pattern expressions are supported.\n",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "format": "simple pattern",
+ "default": "*"
}
- }
- },
- "/functions": {
- "get": {
- "summary": "Get a list of all registered collector functions.",
- "description": "Collector functions are programs that can be executed on demand.",
- "responses": {
- "200": {
- "description": "A JSON object containing one object per supported function."
- }
+ },
+ "filterNodes": {
+ "name": "nodes",
+ "in": "query",
+ "description": "A simple pattern matching the nodes to be queried. This only controls the data response, not the metadata. The simple pattern is checked against the nodes' machine guid, node id, hostname. The default nodes selector is all the nodes matched by the nodes scope. Both positive and negative simple pattern expressions are supported.\n",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "format": "simple pattern",
+ "default": "*"
}
- }
- },
- "/weights": {
- "get": {
- "summary": "Analyze all the metrics using an algorithm and score them accordingly",
- "description": "This endpoint goes through all metrics and scores them according to an algorithm.",
- "parameters": [
- {
- "name": "baseline_after",
- "in": "query",
- "description": "This parameter can either be an absolute timestamp specifying the starting point of baseline window, or a relative number of seconds (negative, relative to parameter baseline_before). Netdata will assume it is a relative number if it is less that 3 years (in seconds). This parameter is used in KS2 and VOLUME algorithms.",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": -300
- }
- },
- {
- "name": "baseline_before",
- "in": "query",
- "description": "This parameter can either be an absolute timestamp specifying the ending point of the baseline window, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds). This parameter is used in KS2 and VOLUME algorithms.",
- "required": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": -60
- }
- },
- {
- "name": "after",
- "in": "query",
- "description": "This parameter can either be an absolute timestamp specifying the starting point of highlighted window, or a relative number of seconds (negative, relative to parameter highlight_before). Netdata will assume it is a relative number if it is less that 3 years (in seconds).",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": -60
- }
- },
- {
- "name": "before",
- "in": "query",
- "description": "This parameter can either be an absolute timestamp specifying the ending point of the highlighted window, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds).",
- "required": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": 0
- }
- },
- {
- "name": "context",
- "in": "query",
- "description": "A simple pattern matching the contexts to evaluate.",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "string"
- }
- },
- {
- "name": "points",
- "in": "query",
- "description": "The number of points to be evaluated for the highlighted window. The baseline window will be adjusted automatically to receive a proportional amount of points. This parameter is only used by the KS2 algorithm.",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": 500
- }
- },
- {
- "name": "method",
- "in": "query",
- "description": "the algorithm to run",
- "required": false,
- "schema": {
- "type": "string",
- "enum": [
- "ks2",
- "volume",
- "anomaly-rate"
- ],
- "default": "anomaly-rate"
- }
- },
- {
- "name": "tier",
- "in": "query",
- "description": "Use the specified database tier",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "number",
- "format": "integer"
- }
- },
- {
- "name": "timeout",
- "in": "query",
- "description": "Cancel the query if to takes more that this amount of milliseconds.",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "number",
- "format": "integer",
- "default": 60000
- }
- },
- {
- "name": "options",
- "in": "query",
- "description": "Options that affect data generation.",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "array",
- "items": {
- "type": "string",
- "enum": [
- "min2max",
- "abs",
- "absolute",
- "absolute-sum",
- "null2zero",
- "percentage",
- "unaligned",
- "nonzero",
- "anomaly-bit",
- "raw"
- ]
- },
- "default": [
- "null2zero",
- "nonzero",
- "unaligned"
- ]
- }
- },
- {
- "name": "group",
- "in": "query",
- "description": "The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods supported \"min\", \"max\", \"average\", \"sum\", \"incremental-sum\". \"max\" is actually calculated on the absolute value collected (so it works for both positive and negative dimensions to return the most extreme value in either direction).",
- "required": true,
- "allowEmptyValue": false,
- "schema": {
- "type": "string",
- "enum": [
- "min",
- "max",
- "average",
- "median",
- "stddev",
- "sum",
- "incremental-sum",
- "ses",
- "des",
- "cv",
- "countif",
- "percentile",
- "percentile25",
- "percentile50",
- "percentile75",
- "percentile80",
- "percentile90",
- "percentile95",
- "percentile97",
- "percentile98",
- "percentile99",
- "trimmed-mean",
- "trimmed-mean1",
- "trimmed-mean2",
- "trimmed-mean3",
- "trimmed-mean5",
- "trimmed-mean10",
- "trimmed-mean15",
- "trimmed-mean20",
- "trimmed-mean25",
- "trimmed-median",
- "trimmed-median1",
- "trimmed-median2",
- "trimmed-median3",
- "trimmed-median5",
- "trimmed-median10",
- "trimmed-median15",
- "trimmed-median20",
- "trimmed-median25"
- ],
- "default": "average"
- }
+ },
+ "filterContexts": {
+ "name": "contexts",
+ "in": "query",
+ "description": "A simple pattern matching the contexts to be queried. This only controls the data response, not the metadata. Both positive and negative simple pattern expressions are supported.\n",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "format": "simple pattern",
+ "default": "*"
+ }
+ },
+ "filterInstances": {
+ "name": "instances",
+ "in": "query",
+ "description": "A simple pattern matching the instances to be queried. The simple pattern is checked against the instance `id`, the instance `name`, the fully qualified name of the instance `id` and `name`, like `instance@machine_guid`, where `instance` is either its `id` or `name`. Both positive and negative simple pattern expressions are supported.\n",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "format": "simple pattern",
+ "default": "*"
+ }
+ },
+ "filterLabels": {
+ "name": "labels",
+ "in": "query",
+ "description": "A simple pattern matching the labels to be queried. The simple pattern is checked against `name:value` of all the labels of all the eligible instances (as filtered by all the above: scope nodes, scope contexts, nodes, contexts and instances). Negative simple patterns should not be used in this filter.\n",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "format": "simple pattern",
+ "default": "*"
+ }
+ },
+ "filterAlerts": {
+ "name": "alerts",
+ "in": "query",
+ "description": "A simple pattern matching the alerts to be queried. The simple pattern is checked against the `name` of alerts and the combination of `name:status`, when status is one of `CLEAR`, `WARNING`, `CRITICAL`, `REMOVED`, `UNDEFINED`, `UNINITIALIZED`, of all the alerts of all the eligible instances (as filtered by all the above). A negative simple pattern will exclude the instances having the labels matched.\n",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "format": "simple pattern",
+ "default": "*"
+ }
+ },
+ "filterDimensions": {
+ "name": "dimensions",
+ "in": "query",
+ "description": "A simple patterns matching the dimensions to be queried. The simple pattern is checked against and `id` and the `name` of the dimensions of the eligible instances (as filtered by all the above). Both positive and negative simple pattern expressions are supported.\n",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "format": "simple pattern",
+ "default": "*"
+ }
+ },
+ "dataFormat1": {
+ "name": "format",
+ "in": "query",
+ "description": "The format of the data to be returned.",
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "json",
+ "jsonp",
+ "csv",
+ "tsv",
+ "tsv-excel",
+ "ssv",
+ "ssvcomma",
+ "datatable",
+ "datasource",
+ "html",
+ "markdown",
+ "array",
+ "csvjsonarray"
+ ],
+ "default": "json"
+ }
+ },
+ "dataFormat2": {
+ "name": "format",
+ "in": "query",
+ "description": "The format of the data to be returned.",
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "json",
+ "json2",
+ "jsonp",
+ "csv",
+ "tsv",
+ "tsv-excel",
+ "ssv",
+ "ssvcomma",
+ "datatable",
+ "datasource",
+ "html",
+ "markdown",
+ "array",
+ "csvjsonarray"
+ ],
+ "default": "json2"
+ }
+ },
+ "dataQueryOptions": {
+ "name": "options",
+ "in": "query",
+ "description": "Options that affect data generation.\n* `jsonwrap` - Wrap the output in a JSON object with metadata about the query.\n* `raw` - change the output so that it is aggregatable across multiple such queries. Supported by `/api/v2` data queries and `json2` format.\n* `minify` - Remove unnecessary spaces and newlines from the output.\n* `debug` - Provide additional information in `jsonwrap` output to help tracing issues.\n* `nonzero` - Do not return dimensions that all their values are zero, to improve the visual appearance of charts. They will still be returned if all the dimensions are entirely zero.\n* `null2zero` - Replace `null` values with `0`.\n* `absolute` or `abs` - Traditionally Netdata returns select dimensions negative to improve visual appearance. This option turns this feature off.\n* `display-absolute` - Only used by badges, to do color calculation using the signed value, but render the value without a sign.\n* `flip` or `reversed` - Order the timestamps array in reverse order (newest to oldest).\n* `min2max` - When flattening multi-dimensional data into a single metric format, use `max - min` instead of `sum`. This is EOL - use `/api/v2` to control aggregation across dimensions.\n* `percentage` - Convert all values into a percentage vs the row total. When enabled, Netdata will query all dimensions, even the ones that have not been selected or are hidden, to find the row total, in order to calculate the percentage of each dimension selected.\n* `seconds` - Output timestamps in seconds instead of dates.\n* `milliseconds` or `ms` - Output timestamps in milliseconds instead of dates.\n* `unaligned` - by default queries are aligned to the the view, so that as time passes past data returned do not change. When a data query will not be used for visualization, `unaligned` can be given to avoid aligning the query time-frame for visual precision.\n* `match-ids`, `match-names`. By default filters match both IDs and names when they are available. Setting either of the two options will disable the other.\n* `anomaly-bit` - query the anomaly information instead of metric values. This is EOL, use `/api/v2` and `json2` format which always returns this information and many more.\n* `jw-anomaly-rates` - return anomaly rates as a separate result set in the same `json` format response. This is EOL, use `/api/v2` and `json2` format which always returns information and many more. \n* `details` - `/api/v2/data` returns in `jsonwrap` the full tree of dimensions that have been matched by the query.\n* `group-by-labels` - `/api/v2/data` returns in `jsonwrap` flattened labels per output dimension. These are used to identify the instances that have been aggregated into each dimension, making it possible to provide a map, like Netdata does for Kubernetes.\n* `natural-points` - return timestamps as found in the database. The result is again fixed-step, but the query engine attempts to align them with the timestamps found in the database.\n* `virtual-points` - return timestamps independent of the database alignment. This is needed aggregating data across multiple Netdata agents, to ensure that their outputs do not need to be interpolated to be merged.\n* `selected-tier` - use data exclusively from the selected tier given with the `tier` parameter. This option is set automatically when the `tier` parameter is set.\n* `all-dimensions` - In `/api/v1` `jsonwrap` include metadata for all candidate metrics examined. In `/api/v2` this is standard behavior and no option is needed.\n* `label-quotes` - In `csv` output format, enclose each header label in quotes.\n* `objectrows` - Each row of value should be an object, not an array (only for `json` format).\n* `google_json` - Comply with google JSON/JSONP specs (only for `json` format).\n",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "jsonwrap",
+ "raw",
+ "minify",
+ "debug",
+ "nonzero",
+ "null2zero",
+ "abs",
+ "absolute",
+ "display-absolute",
+ "flip",
+ "reversed",
+ "min2max",
+ "percentage",
+ "seconds",
+ "ms",
+ "milliseconds",
+ "unaligned",
+ "match-ids",
+ "match-names",
+ "anomaly-bit",
+ "jw-anomaly-rates",
+ "details",
+ "group-by-labels",
+ "natural-points",
+ "virtual-points",
+ "selected-tier",
+ "all-dimensions",
+ "label-quotes",
+ "objectrows",
+ "google_json"
+ ]
},
- {
- "name": "group_options",
- "in": "query",
- "description": "When the group function supports additional parameters, this field can be used to pass them to it. Currently only \"countif\" supports this.",
- "required": false,
- "allowEmptyValue": false,
- "schema": {
- "type": "string"
- }
+ "default": [
+ "seconds",
+ "jsonwrap"
+ ]
+ }
+ },
+ "dataTimeGroup1": {
+ "name": "group",
+ "in": "query",
+ "description": "Time aggregation function. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. If the `absolute` option is set, the values are turned positive before applying this calculation.\n",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "min",
+ "max",
+ "avg",
+ "average",
+ "median",
+ "stddev",
+ "sum",
+ "incremental-sum",
+ "ses",
+ "des",
+ "cv",
+ "countif",
+ "percentile",
+ "percentile25",
+ "percentile50",
+ "percentile75",
+ "percentile80",
+ "percentile90",
+ "percentile95",
+ "percentile97",
+ "percentile98",
+ "percentile99",
+ "trimmed-mean",
+ "trimmed-mean1",
+ "trimmed-mean2",
+ "trimmed-mean3",
+ "trimmed-mean5",
+ "trimmed-mean10",
+ "trimmed-mean15",
+ "trimmed-mean20",
+ "trimmed-mean25",
+ "trimmed-median",
+ "trimmed-median1",
+ "trimmed-median2",
+ "trimmed-median3",
+ "trimmed-median5",
+ "trimmed-median10",
+ "trimmed-median15",
+ "trimmed-median20",
+ "trimmed-median25"
+ ],
+ "default": "average"
+ }
+ },
+ "dataTimeGroup2": {
+ "name": "time_group",
+ "in": "query",
+ "description": "Time aggregation function. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. If the `absolute` option is set, the values are turned positive before applying this calculation.\n",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "min",
+ "max",
+ "avg",
+ "average",
+ "median",
+ "stddev",
+ "sum",
+ "incremental-sum",
+ "ses",
+ "des",
+ "cv",
+ "countif",
+ "percentile",
+ "percentile25",
+ "percentile50",
+ "percentile75",
+ "percentile80",
+ "percentile90",
+ "percentile95",
+ "percentile97",
+ "percentile98",
+ "percentile99",
+ "trimmed-mean",
+ "trimmed-mean1",
+ "trimmed-mean2",
+ "trimmed-mean3",
+ "trimmed-mean5",
+ "trimmed-mean10",
+ "trimmed-mean15",
+ "trimmed-mean20",
+ "trimmed-mean25",
+ "trimmed-median",
+ "trimmed-median1",
+ "trimmed-median2",
+ "trimmed-median3",
+ "trimmed-median5",
+ "trimmed-median10",
+ "trimmed-median15",
+ "trimmed-median20",
+ "trimmed-median25"
+ ],
+ "default": "average"
+ }
+ },
+ "dataTimeGroupOptions1": {
+ "name": "group_options",
+ "in": "query",
+ "description": "When the time grouping function supports additional parameters, this field can be used to pass them to it. Currently `countif`, `trimmed-mean`, `trimmed-median` and `percentile` support this. For `countif` the string may start with `<`, `<=`, `<:`, `<>`, `!=`, `>`, `>=`, `>:`. For all others just a number is expected.\n",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ "dataTimeGroupOptions2": {
+ "name": "time_group_options",
+ "in": "query",
+ "description": "When the time grouping function supports additional parameters, this field can be used to pass them to it. Currently `countif`, `trimmed-mean`, `trimmed-median` and `percentile` support this. For `countif` the string may start with `<`, `<=`, `<:`, `<>`, `!=`, `>`, `>=`, `>:`. For all others just a number is expected.\n",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ "dataTimeResampling1": {
+ "name": "gtime",
+ "in": "query",
+ "description": "The grouping number of seconds. This is used in conjunction with group=average to change the units of metrics (ie when the data is per-second, setting gtime=60 will turn them to per-minute).\n",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ "dataTimeResampling2": {
+ "name": "time_resampling",
+ "in": "query",
+ "description": "For incremental values that are \"per second\", this value is used to resample them to \"per minute` (60) or \"per hour\" (3600). It can only be used in conjunction with group=average.\n",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ "timeoutMS": {
+ "name": "timeout",
+ "in": "query",
+ "description": "Specify a timeout value in milliseconds after which the agent will abort the query and return a 503 error. A value of 0 indicates no timeout.\n",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ "timeoutSecs": {
+ "name": "timeout",
+ "in": "query",
+ "description": "Specify a timeout value in seconds after which the agent will abort the query and return a 504 error. A value of 0 indicates no timeout, but some endpoints, like `weights`, do not accept infinite timeouts (they have a predefined default), so to disable the timeout it must be set to a really high value.\n",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ "before": {
+ "name": "before",
+ "in": "query",
+ "description": "`after` and `before` define the time-frame of a query. `before` can be a negative number of seconds, up to 3 years (-94608000), relative to current clock. If not set, it is assumed to be the current clock time. When `before` is positive, it is assumed to be a unix epoch timestamp. When non-data endpoints support the `after` and `before`, they use the time-frame to limit their response for objects having data retention within the time-frame given.\n",
+ "required": false,
+ "schema": {
+ "type": "integer",
+ "default": 0
+ }
+ },
+ "after": {
+ "name": "after",
+ "in": "query",
+ "description": "`after` and `before` define the time-frame of a query. `after` can be a negative number of seconds, up to 3 years (-94608000), relative to `before`. If not set, it is usually assumed to be -600. When non-data endpoints support the `after` and `before`, they use the time-frame to limit their response for objects having data retention within the time-frame given.\n",
+ "required": false,
+ "schema": {
+ "type": "integer",
+ "default": -600
+ }
+ },
+ "baselineBefore": {
+ "name": "baseline_before",
+ "in": "query",
+ "description": "`baseline_after` and `baseline_before` define the baseline time-frame of a comparative query. `baseline_before` can be a negative number of seconds, up to 3 years (-94608000), relative to current clock. If not set, it is assumed to be the current clock time. When `baseline_before` is positive, it is assumed to be a unix epoch timestamp.\n",
+ "required": false,
+ "schema": {
+ "type": "integer",
+ "default": 0
+ }
+ },
+ "baselineAfter": {
+ "name": "baseline_after",
+ "in": "query",
+ "description": "`baseline_after` and `baseline_before` define the baseline time-frame of a comparative query. `baseline_after` can be a negative number of seconds, up to 3 years (-94608000), relative to `baseline_before`. If not set, it is usually assumed to be -300.\n",
+ "required": false,
+ "schema": {
+ "type": "integer",
+ "default": -600
+ }
+ },
+ "points": {
+ "name": "points",
+ "in": "query",
+ "description": "The number of points to be returned. If not given, or it is <= 0, or it is bigger than the points stored in the database for the given duration, all the available collected values for the given duration will be returned. For `weights` endpoints that do statistical analysis, the `points` define the detail of this analysis (the default is 500).\n",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ "tier": {
+ "name": "tier",
+ "in": "query",
+ "description": "Use only the given dbengine tier for executing the query. Setting this parameters automatically sets the option `selected-tier` for the query.\n",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ "callback": {
+ "name": "callback",
+ "in": "query",
+ "description": "For JSONP responses, the callback function name.\n",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ "filename": {
+ "name": "filename",
+ "in": "query",
+ "description": "Add `Content-Disposition: attachment; filename=` header to the response, that will instruct the browser to save the response with the given filename.\"\n",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ "tqx": {
+ "name": "tqx",
+ "in": "query",
+ "description": "[Google Visualization API](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source?hl=en) formatted parameter.\n",
+ "required": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ "contextOptions1": {
+ "name": "options",
+ "in": "query",
+ "description": "Options that affect data generation.",
+ "required": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "full",
+ "all",
+ "charts",
+ "dimensions",
+ "labels",
+ "uuids",
+ "queue",
+ "flags",
+ "deleted",
+ "deepscan"
+ ]
}
- ],
- "responses": {
- "200": {
- "description": "JSON object with weights for each context, chart and dimension.",
- "content": {
- "application/json": {
- "schema": {
- "$ref": "#/components/schemas/weights"
- }
- }
- }
- },
- "400": {
- "description": "The given parameters are invalid."
- },
- "403": {
- "description": "metrics correlations are not enabled on this Netdata Agent."
- },
- "404": {
- "description": "No charts could be found, or the method that correlated the metrics did not produce any result."
- },
- "504": {
- "description": "Timeout - the query took too long and has been cancelled."
+ }
+ },
+ "chart": {
+ "name": "chart",
+ "in": "query",
+ "description": "The id of the chart as returned by the `/api/v1/charts` call.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "format": "as returned by `/api/v1/charts`"
+ }
+ },
+ "context": {
+ "name": "context",
+ "in": "query",
+ "description": "The context of the chart as returned by the /charts call.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "format": "as returned by /charts"
+ }
+ },
+ "dimension": {
+ "name": "dimension",
+ "in": "query",
+ "description": "Zero, one or more dimension ids or names, as returned by the /chart call, separated with comma or pipe. Netdata simple patterns are supported.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "format": "as returned by /charts"
}
}
+ },
+ "dimensions": {
+ "name": "dimensions",
+ "in": "query",
+ "description": "a simple pattern matching dimensions (use comma or pipe as separator)",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ "chart_label_key": {
+ "name": "chart_label_key",
+ "in": "query",
+ "description": "Specify the chart label keys that need to match for context queries as comma separated values. At least one matching key is needed to match the corresponding chart.\n",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "format": "key1,key2,key3"
+ }
+ },
+ "chart_labels_filter": {
+ "name": "chart_labels_filter",
+ "in": "query",
+ "description": "Specify the chart label keys and values to match for context queries. All keys/values need to match for the chart to be included in the query. The labels are specified as key1:value1,key2:value2\n",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "format": "key1:value1,key2:value2,key3:value3"
+ }
+ },
+ "weightMethods": {
+ "name": "method",
+ "in": "query",
+ "description": "The weighting / scoring algorithm.",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "ks2",
+ "volume",
+ "anomaly-rate",
+ "value"
+ ]
+ }
}
- }
- },
- "servers": [
- {
- "url": "https://registry.my-netdata.io/api/v1"
},
- {
- "url": "http://registry.my-netdata.io/api/v1"
- }
- ],
- "components": {
"schemas": {
"info": {
"type": "object",
@@ -2241,7 +2552,7 @@
},
"dimensions": {
"type": "object",
- "description": "An object containing all the chart dimensions available for the chart. This is used as an indexed array. For each pair in the dictionary: the key is the id of the dimension and the value is a dictionary containing the name.",
+ "description": "An object containing all the chart dimensions available for the chart. This is used as an indexed array. For each pair in the dictionary: the key is the id of the dimension and the value is a dictionary containing the name.\"\n",
"additionalProperties": {
"type": "object",
"properties": {
@@ -2423,7 +2734,810 @@
}
}
},
- "data": {
+ "jsonwrap2": {
+ "description": "Data response with `format=json2`\n",
+ "type": "object",
+ "properties": {
+ "api": {
+ "$ref": "#/components/schemas/api"
+ },
+ "agents": {
+ "$ref": "#/components/schemas/agents"
+ },
+ "versions": {
+ "$ref": "#/components/schemas/versions"
+ },
+ "summary": {
+ "description": "Summarized information about nodes, contexts, instances, labels, alerts, and dimensions. The items returned are determined by the scope of the query only, however the statistical data in them are influenced by the filters of the query. Using this information the dashboard allows users to slice and dice the data by filtering and grouping.\n",
+ "type": "object",
+ "properties": {
+ "nodes": {
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/nodeWithDataStatistics"
+ }
+ },
+ "contexts": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "description": "An object describing a unique context. `is` stands for instances, `ds` for dimensions, `al` for alerts, `sts` for statistics.\n",
+ "properties": {
+ "id": {
+ "description": "the context id.",
+ "type": "string"
+ },
+ "is": {
+ "$ref": "#/components/schemas/jsonwrap2_items_count"
+ },
+ "ds": {
+ "$ref": "#/components/schemas/jsonwrap2_items_count"
+ },
+ "al": {
+ "$ref": "#/components/schemas/jsonwrap2_alerts_count"
+ },
+ "sts": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts"
+ },
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts_raw"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "instances": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "description": "An object describing an instance. `ds` stands for dimensions, `al` for alerts, `sts` for statistics.\n",
+ "properties": {
+ "id": {
+ "description": "the id of the instance.",
+ "type": "string"
+ },
+ "nm": {
+ "description": "the name of the instance (may be absent when it is the same with the id)",
+ "type": "string"
+ },
+ "ni": {
+ "description": "the node index id this instance belongs to. The UI uses this to compone the fully qualified name of the instance, using the node hostname to present it to users and its machine guid to add it to filters."
+ },
+ "ds": {
+ "$ref": "#/components/schemas/jsonwrap2_items_count"
+ },
+ "al": {
+ "$ref": "#/components/schemas/jsonwrap2_alerts_count"
+ },
+ "sts": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts"
+ },
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts_raw"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "dimensions": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "description": "An object describing a unique dimension. `ds` stands for `dimensions`, `sts` for statistics.\n",
+ "properties": {
+ "id": {
+ "description": "the id of the dimension.",
+ "type": "string"
+ },
+ "nm": {
+ "description": "the name of the dimension (may be absent when it is the same with the id)",
+ "type": "string"
+ },
+ "ds": {
+ "$ref": "#/components/schemas/jsonwrap2_items_count"
+ },
+ "sts": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts"
+ },
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts_raw"
+ }
+ ]
+ }
+ }
+ }
+ },
+ "labels": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "description": "An object describing a label key. `ds` stands for `dimensions`, `sts` for statistics.\n",
+ "properties": {
+ "id": {
+ "description": "the key of the label.",
+ "type": "string"
+ },
+ "ds": {
+ "$ref": "#/components/schemas/jsonwrap2_items_count"
+ },
+ "sts": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts"
+ },
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts_raw"
+ }
+ ]
+ },
+ "vl": {
+ "description": "An array of values for this key.\n",
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "description": "The value string",
+ "type": "string"
+ },
+ "ds": {
+ "$ref": "#/components/schemas/jsonwrap2_items_count"
+ },
+ "sts": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts"
+ },
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts_raw"
+ }
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "alerts": {
+ "description": "An array of all the unique alerts running, grouped by alert name (`nm` is available here)\n",
+ "type": "array",
+ "items": {
+ "$ref": "#/components/schemas/jsonwrap2_alerts_count"
+ }
+ }
+ }
+ },
+ "totals": {
+ "type": "object",
+ "properties": {
+ "nodes": {
+ "$ref": "#/components/schemas/jsonwrap2_items_count"
+ },
+ "contexts": {
+ "$ref": "#/components/schemas/jsonwrap2_items_count"
+ },
+ "instances": {
+ "$ref": "#/components/schemas/jsonwrap2_items_count"
+ },
+ "dimensions": {
+ "$ref": "#/components/schemas/jsonwrap2_items_count"
+ },
+ "label_keys": {
+ "$ref": "#/components/schemas/jsonwrap2_items_count"
+ },
+ "label_key_values": {
+ "$ref": "#/components/schemas/jsonwrap2_items_count"
+ }
+ }
+ },
+ "functions": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "db": {
+ "type": "object",
+ "properties": {
+ "tiers": {
+ "description": "The number of tiers this server is using.\n",
+ "type": "integer"
+ },
+ "update_every": {
+ "description": "The minimum update every, in seconds, for all tiers and all metrics aggregated into this query.\n",
+ "type": "integer"
+ },
+ "first_entry": {
+ "description": "The minimum unix epoch timestamp of the retention across all tiers for all metrics aggregated into this query.\n",
+ "type": "integer"
+ },
+ "last_entry": {
+ "description": "The maximum unix epoch timestamp of the retention across all tier for all metrics aggregated into this query.\n",
+ "type": "integer"
+ },
+ "per_tier": {
+ "description": "An array with information for each of the tiers available, related to this query.\n",
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "tier": {
+ "description": "The tier number of this tier, starting at 0.\n",
+ "type": "integer"
+ },
+ "queries": {
+ "description": "The number of queries executed on this tier. Usually one query per metric is made, but the query may cross multiple tier, in which case more than one query per metric is made.\n",
+ "type": "integer"
+ },
+ "points": {
+ "description": "The number of points read from this tier.\n",
+ "type": "integer"
+ },
+ "update_every": {
+ "description": "The minimum resolution of all metrics queried on this tier.\n",
+ "type": "integer"
+ },
+ "first_entry": {
+ "description": "The minimum unix epoch timestamp available across all metrics that used this tier. This reflects the oldest timestamp of the tier's retention.\n",
+ "type": "integer"
+ },
+ "last_entry": {
+ "description": "The maximum unix epoch timestamp available across all metrics that used this tier. This reflects the newest timestamp of the tier's retention.\n"
+ }
+ }
+ }
+ },
+ "units": {
+ "description": "The units of the database data\n",
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ ]
+ },
+ "dimensions": {
+ "type": "object",
+ "properties": {
+ "ids": {
+ "description": "An array with the dimension ids that uniquely identify the dimensions for this query. It is the same with `view.dimensions.ids`.\n",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "units": {
+ "description": "An array with the units each dimension has in the database (independent of group-by aggregation that may override the units).\n",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "sts": {
+ "description": "Statistics about the data collection points used for each dimension.\n",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts"
+ },
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts_raw"
+ }
+ ]
+ }
+ }
+ }
+ }
+ },
+ "view": {
+ "type": "object",
+ "properties": {
+ "title": {
+ "description": "The title the chart should have.\n",
+ "type": "string"
+ },
+ "format": {
+ "description": "The format the `result` top level member has. Available on when `debug` flag is set.\n",
+ "type": "string"
+ },
+ "options": {
+ "description": "An array presenting all the options given to the query. Available on when `debug` flag is set.\n",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "time_group": {
+ "description": "The same as the parameter `time_group`. Available on when `debug` flag is set.\n",
+ "type": "string"
+ },
+ "after": {
+ "description": "The oldest unix epoch timestamp of the data returned in the `result`.\n",
+ "type": "integer"
+ },
+ "before": {
+ "description": "The newest unix epoch timestamp of the data returned in the `result`.\n",
+ "type": "integer"
+ },
+ "partial_data_trimming": {
+ "description": "Information related to trimming of the last few points of the `result`, that was required to remove (increasing) partial data.\nTrimming is disabled when the `raw` option is given to the query.\nThis object is available only when the `debug` flag is set.\n",
+ "type": "object",
+ "properties": {
+ "max_update_every": {
+ "description": "The maximum `update_every` for all metrics aggregated into the query.\nTrimming is by default enabled at `view.before - max_update_every`, but only when `view.before >= now - max_update_every`.\n",
+ "type": "integer"
+ },
+ "expected_after": {
+ "description": "The timestamp at which trimming can be enabled.\nIf this timestamp is greater or equal to `view.before`, there is no trimming.\n",
+ "type": "integer"
+ },
+ "trimmed_after": {
+ "description": "The timestamp at which trimming has been applied.\nIf this timestamp is greater or equal to `view.before`, there is no trimming.\n"
+ }
+ }
+ },
+ "points": {
+ "description": "The number of points in `result`. Available only when `raw` is given.\n",
+ "type": "integer"
+ },
+ "units": {
+ "description": "The units of the query.\n",
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ ]
+ },
+ "chart_type": {
+ "description": "The default chart type of the query.\n",
+ "type": "string",
+ "enum": [
+ "line",
+ "area",
+ "stacked"
+ ]
+ },
+ "dimensions": {
+ "description": "Detailed information about the chart dimensions included in the `result`.\n",
+ "type": "object",
+ "properties": {
+ "grouped_by": {
+ "description": "An array with the order of the groupings performed.\n",
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "selected",
+ "dimension",
+ "instance",
+ "node",
+ "context",
+ "units",
+ "label:key1",
+ "label:key2",
+ "label:keyN"
+ ]
+ }
+ },
+ "ids": {
+ "description": "An array with the dimension ids that uniquely identify the dimensions for this query.\n",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "names": {
+ "description": "An array with the dimension names to be presented to users. Names may be overlapping, but IDs are not.\n",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "priorities": {
+ "description": "An array with the relative priorities of the dimensions.\nNumbers may not be sequential or unique. The application is expected to order by this and then by name.\n",
+ "type": "array",
+ "items": {
+ "type": "integer"
+ }
+ },
+ "aggregated": {
+ "description": "An array with the number of source metrics aggregated into each dimension.\n",
+ "type": "array",
+ "items": {
+ "type": "integer"
+ }
+ },
+ "units": {
+ "description": "An array with the units each dimension has.\n",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "sts": {
+ "description": "Statistics about the view points for each dimension.\n",
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts"
+ },
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts_raw"
+ }
+ ]
+ },
+ "labels": {
+ "description": "The labels associated with each dimension in the query.\nThis object is only available when the `group-by-labels` option is given to the query.\n",
+ "type": "object",
+ "properties": {
+ "label_key1": {
+ "description": "An array having one entry for each of the dimensions of the query.\n",
+ "type": "array",
+ "items": {
+ "description": "An array having one entry for each of the values this label key has for the given dimension.\n",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "min": {
+ "description": "The minimum value of all points included in the `result`.\n",
+ "type": "number"
+ },
+ "max": {
+ "description": "The maximum value of all points included in the `result`.\n",
+ "type": "number"
+ }
+ }
+ },
+ "result": {
+ "$ref": "#/components/schemas/data_json_formats2"
+ },
+ "timings": {
+ "type": "object"
+ }
+ }
+ },
+ "jsonwrap2_sts": {
+ "description": "Statistical values\n",
+ "type": "object",
+ "properties": {
+ "min": {
+ "description": "The minimum value of all metrics aggregated",
+ "type": "number"
+ },
+ "max": {
+ "description": "The maximum value of all metrics aggregated",
+ "type": "number"
+ },
+ "avg": {
+ "description": "The average value of all metrics aggregated",
+ "type": "number"
+ },
+ "arp": {
+ "description": "The average anomaly rate of all metrics aggregated",
+ "type": "number"
+ },
+ "con": {
+ "description": "The contribution percentage of all the metrics aggregated",
+ "type": "number"
+ }
+ }
+ },
+ "jsonwrap2_sts_raw": {
+ "description": "Statistical values when `raw` option is given.\n",
+ "type": "object",
+ "properties": {
+ "min": {
+ "description": "The minimum value of all metrics aggregated",
+ "type": "number"
+ },
+ "max": {
+ "description": "The maximum value of all metrics aggregated",
+ "type": "number"
+ },
+ "sum": {
+ "description": "The sum value of all metrics aggregated",
+ "type": "number"
+ },
+ "ars": {
+ "description": "The sum anomaly rate of all metrics aggregated",
+ "type": "number"
+ },
+ "vol": {
+ "description": "The volume of all the metrics aggregated",
+ "type": "number"
+ },
+ "cnt": {
+ "description": "The count of all metrics aggregated",
+ "type": "integer"
+ }
+ }
+ },
+ "jsonwrap2_items_count": {
+ "description": "Depending on the placement of this object, `items` may be `nodes`, `contexts`, `instances`, `dimensions`, `label keys`, `label key-value pairs`. Furthermore, if the whole object is missing it should be assumed that all its members are zero.\n",
+ "type": "object",
+ "properties": {
+ "sl": {
+ "description": "The number of items `selected` to query. If absent it is zero.",
+ "type": "integer"
+ },
+ "ex": {
+ "description": "The number of items `excluded` from querying. If absent it is zero.",
+ "type": "integer"
+ },
+ "qr": {
+ "description": "The number of items (out of `selected`) the query successfully `queried`. If absent it is zero.",
+ "type": "integer"
+ },
+ "fl": {
+ "description": "The number of items (from `selected`) that `failed` to be queried. If absent it is zero.",
+ "type": "integer"
+ }
+ }
+ },
+ "jsonwrap2_alerts_count": {
+ "description": "Counters about alert statuses. If this object is missing, it is assumed that all its members are zero.\n",
+ "type": "object",
+ "properties": {
+ "nm": {
+ "description": "The name of the alert. Can be absent when the counters refer to more than one alert instances.",
+ "type": "string"
+ },
+ "cl": {
+ "description": "The number of CLEAR alerts. If absent, it is zero.",
+ "type": "integer"
+ },
+ "wr": {
+ "description": "The number of WARNING alerts. If absent, it is zero.",
+ "type": "integer"
+ },
+ "cr": {
+ "description": "The number of CRITICAL alerts. If absent, it is zero.",
+ "type": "integer"
+ },
+ "ot": {
+ "description": "The number of alerts that are not CLEAR, WARNING, CRITICAL (so, they are \"other\"). If absent, it is zero.\n",
+ "type": "integer"
+ }
+ }
+ },
+ "api": {
+ "description": "The version of the API used.",
+ "type": "integer"
+ },
+ "agents": {
+ "description": "An array of agent definitions consulted to compose this response.\n",
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "mg": {
+ "description": "The agent machine GUID.",
+ "type": "string",
+ "format": "uuid"
+ },
+ "nd": {
+ "description": "The agent cloud node ID.",
+ "type": "string",
+ "format": "uuid"
+ },
+ "nm": {
+ "description": "The agent hostname.",
+ "type": "string"
+ },
+ "ai": {
+ "description": "The agent index ID for this agent, in this response.",
+ "type": "integer"
+ },
+ "now": {
+ "description": "The current unix epoch timestamp of this agent.",
+ "type": "integer"
+ }
+ }
+ }
+ },
+ "versions": {
+ "description": "Hashes that allow the caller to detect important database changes of Netdata agents.\n",
+ "type": "object",
+ "properties": {
+ "nodes_hard_hash": {
+ "description": "An auto-increment value that reflects the number of changes to the number of nodes maintained by the server. Everytime a node is added or removed, this number gets incremented.\n",
+ "type": "integer"
+ },
+ "contexts_hard_hash": {
+ "description": "An auto-increment value that reflects the number of changes to the number of contexts maintained by the server. Everytime a context is added or removed, this number gets incremented.\n",
+ "type": "integer"
+ },
+ "contexts_soft_hash": {
+ "description": "An auto-increment value that reflects the number of changes to the queue that sends contexts updates to Netdata Cloud. Everytime the contents of a context are updated, this number gets incremented.\n",
+ "type": "integer"
+ },
+ "alerts_hard_hash": {
+ "description": "An auto-increment value that reflects the number of changes to the number of alerts. Everytime an alert is added or removed, this number gets incremented.\n",
+ "type": "integer"
+ },
+ "alerts_soft_hash": {
+ "description": "An auto-increment value that reflects the number of alerts transitions. Everytime an alert transitions to a new state, this number gets incremented.\n",
+ "type": "integer"
+ }
+ }
+ },
+ "nodeBasic": {
+ "type": "object",
+ "description": "Basic information about a node.",
+ "required": [
+ "ni",
+ "st"
+ ],
+ "properties": {
+ "mg": {
+ "description": "The machine guid of the node. May not be available if the request is served by the Netdata Cloud.",
+ "type": "string",
+ "format": "UUID"
+ },
+ "nd": {
+ "description": "The node id of the node. May not be available if the node is not registered to Netdata Cloud.",
+ "type": "string",
+ "format": "UUID"
+ },
+ "nm": {
+ "description": "The name (hostname) of the node.",
+ "type": "string"
+ },
+ "ni": {
+ "description": "The node index id, a number that uniquely identifies this node for this query.",
+ "type": "integer"
+ },
+ "st": {
+ "description": "Status information about the communication with this node.",
+ "type": "object",
+ "properties": {
+ "ai": {
+ "description": "The agent index id that has been contacted for this node.",
+ "type": "integer"
+ },
+ "code": {
+ "description": "The HTTP response code of the response for this node. When working directly with an agent, this is always 200. If the `code` is missing, it should be assumed to be 200.",
+ "type": "integer"
+ },
+ "msg": {
+ "description": "A human readable description of the error, if any. If `msg` is missing, or is the empty string `\"\"` or is `null`, there is no description associated with the current status.",
+ "type": "string"
+ },
+ "ms": {
+ "description": "The time in milliseconds this node took to respond, or if the local agent responded for this node, the time it needed to execute the query. If `ms` is missing, the time that was required to query this node is unknown.",
+ "type": "number"
+ }
+ }
+ }
+ }
+ },
+ "nodeWithDataStatistics": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/nodeBasic"
+ },
+ {
+ "type": "object",
+ "description": "`is` stands for instances, `ds` for dimensions, `al` for alerts, `sts` for statistics.\n",
+ "properties": {
+ "is": {
+ "$ref": "#/components/schemas/jsonwrap2_items_count"
+ },
+ "ds": {
+ "$ref": "#/components/schemas/jsonwrap2_items_count"
+ },
+ "al": {
+ "$ref": "#/components/schemas/jsonwrap2_alerts_count"
+ },
+ "sts": {
+ "oneOf": [
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts"
+ },
+ {
+ "$ref": "#/components/schemas/jsonwrap2_sts_raw"
+ }
+ ]
+ }
+ }
+ }
+ ]
+ },
+ "nodeFull": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/nodeBasic"
+ },
+ {
+ "type": "object",
+ "properties": {
+ "version": {
+ "description": "The version of the Netdata Agent the node runs.",
+ "type": "string"
+ },
+ "hops": {
+ "description": "How many hops away from the origin node, the queried one is. 0 means the agent itself is the origin node.",
+ "type": "integer"
+ },
+ "state": {
+ "description": "The current state of the node on this agent.",
+ "type": "string",
+ "enum": [
+ "reachable",
+ "stale",
+ "offline"
+ ]
+ }
+ }
+ }
+ ]
+ },
+ "context2Basic": {
+ "type": "object",
+ "properties": {
+ "family": {
+ "type": "string"
+ },
+ "priority": {
+ "type": "integer"
+ },
+ "first_entry": {
+ "type": "integer"
+ },
+ "last_entry": {
+ "type": "integer"
+ },
+ "live": {
+ "type": "boolean"
+ }
+ }
+ },
+ "contexts2": {
+ "description": "`/api/v2/contexts` and `/api/v2/q` response about multi-node contexts hosted by a Netdata agent.\n",
+ "type": "object",
+ "properties": {
+ "api": {
+ "$ref": "#/components/schemas/api"
+ },
+ "agents": {
+ "$ref": "#/components/schemas/agents"
+ },
+ "versions": {
+ "$ref": "#/components/schemas/versions"
+ },
+ "contexts": {
+ "additionalProperties": {
+ "$ref": "#/components/schemas/context2Basic"
+ }
+ }
+ }
+ },
+ "jsonwrap1": {
"type": "object",
"discriminator": {
"propertyName": "format"
@@ -2432,7 +3546,7 @@
"properties": {
"api": {
"type": "number",
- "description": "The API version this conforms to, currently 1."
+ "description": "The API version this conforms to."
},
"id": {
"type": "string",
@@ -2519,149 +3633,173 @@
"additionalProperties": {
"$ref": "#/components/schemas/chart_variables"
}
+ },
+ "result": {
+ "$ref": "#/components/schemas/data_json_formats1"
}
}
},
- "data_json": {
- "description": "Data response in json format.",
- "allOf": [
+ "data_json_formats1": {
+ "description": "Depending on the `format` given to a data query, any of the following may be returned.\n",
+ "oneOf": [
{
- "$ref": "#/components/schemas/data"
+ "$ref": "#/components/schemas/data_json"
},
{
- "properties": {
- "result": {
- "type": "object",
- "properties": {
- "labels": {
- "description": "The dimensions retrieved from the chart.",
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "data": {
- "description": "The data requested, one element per sample with each element containing the values of the dimensions described in the labels value.",
- "type": "array",
- "items": {
- "type": "number"
- }
- }
- },
- "description": "The result requested, in the format requested."
- }
- }
+ "$ref": "#/components/schemas/data_datatable"
+ },
+ {
+ "$ref": "#/components/schemas/data_csvjsonarray"
+ },
+ {
+ "$ref": "#/components/schemas/data_array"
+ },
+ {
+ "$ref": "#/components/schemas/data_txt"
}
]
},
- "data_flat": {
- "description": "Data response in csv / tsv / tsv-excel / ssv / ssv-comma / markdown / html formats.",
- "allOf": [
+ "data_json_formats2": {
+ "description": "Depending on the `format` given to a data query, any of the following may be returned.\n",
+ "oneOf": [
{
- "$ref": "#/components/schemas/data"
+ "$ref": "#/components/schemas/data_json2"
},
{
- "properties": {
- "result": {
- "type": "string"
- }
- }
+ "$ref": "#/components/schemas/data_json_formats1"
}
]
},
- "data_array": {
- "description": "Data response in array format.",
- "allOf": [
- {
- "$ref": "#/components/schemas/data"
+ "data_json2": {
+ "type": "object",
+ "properties": {
+ "labels": {
+ "description": "The IDs of the dimensions returned. The first is always `time`.\n",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
},
- {
+ "point": {
+ "description": "The format of each point returned.\n",
+ "type": "object",
"properties": {
- "result": {
- "type": "array",
- "items": {
- "type": "number"
- }
+ "value": {
+ "description": "The index of the value in each point.\n",
+ "type": "integer"
+ },
+ "arp": {
+ "description": "The index of the anomaly rate in each point.\n",
+ "type": "integer"
+ },
+ "pa": {
+ "description": "The index of the point annotations in each point.\nThis is a bitmap. `EMPTY = 1`, `RESET = 2`, `PARTIAL = 4`.\n`EMPTY` means the point has no value.\n`RESET` means that at least one metric aggregated experienced an overflow (a counter that wrapped).\n`PARTIAL` means that this point should have more metrics aggregated into it, but not all metrics had data.\n",
+ "type": "integer"
+ },
+ "count": {
+ "description": "The number of metrics aggregated into this point. This exists only when the option `raw` is given to the query.\n",
+ "type": "integer"
}
}
+ },
+ "data": {
+ "type": "array",
+ "items": {
+ "allOf": [
+ {
+ "type": "integer"
+ },
+ {
+ "type": "array"
+ }
+ ]
+ }
}
- ]
+ }
},
- "data_csvjsonarray": {
- "description": "Data response in csvjsonarray format.",
- "allOf": [
- {
- "$ref": "#/components/schemas/data"
+ "data_json": {
+ "description": "Data response in `json` format.",
+ "type": "object",
+ "properties": {
+ "labels": {
+ "description": "The dimensions retrieved from the chart.",
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
},
- {
- "properties": {
- "result": {
- "description": "The first inner array contains strings showing the labels of each column, each subsequent array contains the values for each point in time.",
- "type": "array",
- "items": {
- "type": "array",
- "items": {}
- }
- }
+ "data": {
+ "description": "The data requested, one element per sample with each element containing the values of the dimensions described in the labels value.\n",
+ "type": "array",
+ "items": {
+ "type": "number"
}
}
- ]
+ }
+ },
+ "data_txt": {
+ "description": "Data response in `csv`, `tsv`, `tsv-excel`, `ssv`, `ssv-comma`, `markdown`, `html` formats.\n",
+ "type": "string"
+ },
+ "data_array": {
+ "description": "Data response in `array` format.",
+ "type": "array",
+ "items": {
+ "type": "number"
+ }
+ },
+ "data_csvjsonarray": {
+ "description": "The first inner array contains strings showing the labels of each column, each subsequent array contains the values for each point in time.\n",
+ "type": "array",
+ "items": {
+ "type": "array",
+ "items": {}
+ }
},
"data_datatable": {
- "description": "Data response in datatable / datasource formats (suitable for Google Charts).",
- "allOf": [
- {
- "$ref": "#/components/schemas/data"
+ "description": "Data response in datatable / datasource formats (suitable for Google Charts).\n",
+ "type": "object",
+ "properties": {
+ "cols": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "description": "Always empty - for future use."
+ },
+ "label": {
+ "description": "The dimension returned from the chart."
+ },
+ "pattern": {
+ "description": "Always empty - for future use."
+ },
+ "type": {
+ "description": "The type of data in the column / chart-dimension."
+ },
+ "p": {
+ "description": "Contains any annotations for the column."
+ }
+ },
+ "required": [
+ "id",
+ "label",
+ "pattern",
+ "type"
+ ]
+ }
},
- {
- "properties": {
- "result": {
- "type": "object",
- "properties": {
- "cols": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "id": {
- "description": "Always empty - for future use."
- },
- "label": {
- "description": "The dimension returned from the chart."
- },
- "pattern": {
- "description": "Always empty - for future use."
- },
- "type": {
- "description": "The type of data in the column / chart-dimension."
- },
- "p": {
- "description": "Contains any annotations for the column."
- }
- },
- "required": [
- "id",
- "label",
- "pattern",
- "type"
- ]
- }
- },
- "rows": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "c": {
- "type": "array",
- "items": {
- "properties": {
- "v": {
- "description": "Each value in the row is represented by an object named `c` with five v fields: data, null, null, 0, the value. This format is fixed by the Google Charts API."
- }
- }
- }
- }
+ "rows": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "c": {
+ "type": "array",
+ "items": {
+ "properties": {
+ "v": {
+ "description": "Each value in the row is represented by an object named `c` with five v fields: data, null, null, 0, the value. This format is fixed by the Google Charts API.\"\n"
}
}
}
@@ -2669,7 +3807,7 @@
}
}
}
- ]
+ }
},
"alarms": {
"type": "object",
@@ -3009,7 +4147,7 @@
"properties": {
"aclk-available": {
"type": "string",
- "description": "Describes whether this agent is capable of connection to the Cloud. False means agent has been built without ACLK component either on purpose (user choice) or due to missing dependency."
+ "description": "Describes whether this agent is capable of connection to the Cloud. False means agent has been built without ACLK component either on purpose (user choice) or due to missing dependency.\n"
},
"aclk-version": {
"type": "integer",
@@ -3165,6 +4303,9 @@
}
}
},
+ "weights2": {
+ "type": "object"
+ },
"weights": {
"type": "object",
"properties": {
diff --git a/web/api/netdata-swagger.yaml b/web/api/netdata-swagger.yaml
index fced6544f..c25f0b719 100644
--- a/web/api/netdata-swagger.yaml
+++ b/web/api/netdata-swagger.yaml
@@ -2,11 +2,124 @@ openapi: 3.0.0
info:
title: Netdata API
description: Real-time performance and health monitoring.
- version: 1.33.1
+ version: "1.38"
+ contact:
+ name: Netdata Agent API
+ email: info@netdata.cloud
+ url: https://netdata.cloud
+ license:
+ name: GPL v3+
+ url: https://github.com/netdata/netdata/blob/master/LICENSE
+servers:
+ - url: https://registry.my-netdata.io
+ - url: http://registry.my-netdata.io
+ - url: http://localhost:19999
+tags:
+ - name: nodes
+ description: Everything related to monitored nodes
+ - name: charts
+ description: Everything related to chart instances - DO NOT USE IN NEW CODE - use contexts instead
+ - name: contexts
+ description: Everything related contexts - in new code, use this instead of charts
+ - name: data
+ description: Everything related to data queries
+ - name: badges
+ description: Everything related to dynamic badges based on metric data
+ - name: weights
+ description: Everything related to scoring / weighting metrics
+ - name: functions
+ description: Everything related to functions
+ - name: alerts
+ description: Everything related to alerts
+ - name: management
+ description: Everything related to managing netdata agents
paths:
- /info:
+ /api/v2/nodes:
+ get:
+ operationId: getNodes2
+ tags:
+ - nodes
+ summary: Nodes Info v2
+ description: |
+ Get a list of all nodes hosted by this Netdata agent.
+ parameters:
+ - $ref: '#/components/parameters/scopeNodes'
+ - $ref: '#/components/parameters/scopeContexts'
+ - $ref: '#/components/parameters/filterNodes'
+ - $ref: '#/components/parameters/filterContexts'
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ description: |
+ `/api/v2/nodes` response for all nodes hosted by a Netdata agent.
+ type: object
+ properties:
+ api:
+ $ref: '#/components/schemas/api'
+ agents:
+ $ref: '#/components/schemas/agents'
+ versions:
+ $ref: '#/components/schemas/versions'
+ nodes:
+ type: array
+ items:
+ $ref: '#/components/schemas/nodeFull'
+ /api/v2/contexts:
+ get:
+ operationId: getContexts2
+ tags:
+ - contexts
+ summary: Contexts Info v2
+ description: |
+ Get a list of all contexts, across all nodes, hosted by this Netdata agent.
+ parameters:
+ - $ref: '#/components/parameters/scopeNodes'
+ - $ref: '#/components/parameters/scopeContexts'
+ - $ref: '#/components/parameters/filterNodes'
+ - $ref: '#/components/parameters/filterContexts'
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/contexts2'
+ /api/v2/q:
get:
- summary: Get netdata basic information
+ operationId: q2
+ tags:
+ - contexts
+ summary: Full Text Search v2
+ description: |
+ Get a list of contexts, across all nodes, hosted by this Netdata agent, matching a string expression
+ parameters:
+ - name: q
+ in: query
+ description: The strings to search for, formatted as a simple pattern
+ required: true
+ schema:
+ type: string
+ format: simple pattern
+ - $ref: '#/components/parameters/scopeNodes'
+ - $ref: '#/components/parameters/scopeContexts'
+ - $ref: '#/components/parameters/filterNodes'
+ - $ref: '#/components/parameters/filterContexts'
+ responses:
+ "200":
+ description: OK
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/contexts2'
+ /api/v1/info:
+ get:
+ operationId: getNodeInfo1
+ tags:
+ - nodes
+ summary: Node Info v1
description: |
The info endpoint returns basic information about netdata. It provides:
* netdata version
@@ -28,9 +141,12 @@ paths:
$ref: "#/components/schemas/info"
"503":
description: netdata daemon not ready (used for health checks).
- /charts:
+ /api/v1/charts:
get:
- summary: Get a list of all charts available at the server
+ operationId: getNodeCharts1
+ tags:
+ - charts
+ summary: List all charts v1 - EOL
description: The charts endpoint returns a summary about all charts stored in the
netdata server.
responses:
@@ -40,19 +156,15 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/chart_summary"
- /chart:
+ /api/v1/chart:
get:
- summary: Get info about a specific chart
+ operationId: getNodeChart1
+ tags:
+ - charts
+ summary: Get one chart v1 - EOL
description: The chart endpoint returns detailed information about a chart.
parameters:
- - name: chart
- in: query
- description: The id of the chart as returned by the /charts call.
- required: true
- schema:
- type: string
- format: as returned by /charts
- default: system.cpu
+ - $ref: '#/components/parameters/chart'
responses:
"200":
description: A javascript object with detailed information about the chart.
@@ -64,70 +176,21 @@ paths:
description: No chart id was supplied in the request.
"404":
description: No chart with the given id is found.
- /contexts:
+ /api/v1/contexts:
get:
- summary: Get a list of all contexts available at the server
+ operationId: getNodeContexts1
+ tags:
+ - contexts
+ summary: Get a list of all node contexts available v1
description: The contexts endpoint returns a summary about all contexts stored in the
netdata server.
parameters:
- - name: options
- in: query
- description: Options that affect data generation.
- required: false
- allowEmptyValue: true
- schema:
- type: array
- items:
- type: string
- enum:
- - full
- - all
- - charts
- - dimensions
- - labels
- - uuids
- - queue
- - flags
- - deleted
- - deepscan
- default:
- - full
- - name: after
- in: query
- description: limit the results on context having data after this timestamp.
- required: false
- schema:
- type: number
- format: integer
- - name: before
- in: query
- description: limit the results on context having data before this timestamp.
- required: false
- schema:
- type: number
- format: integer
- - name: chart_label_key
- in: query
- description: a simple pattern matching charts label keys (use comma or pipe as separator)
- required: false
- allowEmptyValue: true
- schema:
- type: string
- - name: chart_labels_filter
- in: query
- description: "a simple pattern matching charts label key and values (use colon for equality, comma or pipe
- as separator)"
- required: false
- allowEmptyValue: true
- schema:
- type: string
- - name: dimensions
- in: query
- description: a simple pattern matching dimensions (use comma or pipe as separator)
- required: false
- allowEmptyValue: true
- schema:
- type: string
+ - $ref: '#/components/parameters/dimensions'
+ - $ref: '#/components/parameters/chart_label_key'
+ - $ref: '#/components/parameters/chart_labels_filter'
+ - $ref: '#/components/parameters/contextOptions1'
+ - $ref: '#/components/parameters/after'
+ - $ref: '#/components/parameters/before'
responses:
"200":
description: An array of contexts.
@@ -135,404 +198,348 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/context_summary"
- /context:
+ /api/v1/context:
get:
+ operationId: getNodeContext1
+ tags:
+ - contexts
summary: Get info about a specific context
- description: The context endpoint returns detailed information about a given context.
+ description: |
+ The context endpoint returns detailed information about a given context.
+ The `context` parameter is required for this call.
parameters:
- - name: context
- in: query
- description: The id of the context as returned by the /contexts call.
- required: true
- schema:
- type: string
- format: as returned by /contexts
- default: system.cpu
- - name: options
+ - $ref: '#/components/parameters/context'
+ - $ref: '#/components/parameters/dimensions'
+ - $ref: '#/components/parameters/chart_label_key'
+ - $ref: '#/components/parameters/chart_labels_filter'
+ - $ref: '#/components/parameters/contextOptions1'
+ - $ref: '#/components/parameters/after'
+ - $ref: '#/components/parameters/before'
+ responses:
+ "200":
+ description: A javascript object with detailed information about the context.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/context"
+ "400":
+ description: No context id was supplied in the request.
+ "404":
+ description: No context with the given id is found.
+ /api/v2/data:
+ get:
+ operationId: dataQuery2
+ tags:
+ - data
+ summary: Data Query v2
+ description: |
+ Multi-node, multi-context, multi-instance, multi-dimension data queries, with time and metric aggregation.
+ parameters:
+ - name: group_by
in: query
- description: Options that affect data generation.
+ description: |
+ A comma separated list of the groupings required.
+ All possible values can be combined together, except `selected`. If `selected` is given in the list, all others are ignored.
+ The order they are placed in the list is currently ignored.
required: false
- allowEmptyValue: true
schema:
type: array
items:
type: string
enum:
- - full
- - all
- - charts
- - dimensions
- - labels
- - uuids
- - queue
- - flags
- - deleted
- - deepscan
+ - dimension
+ - instance
+ - percentage-of-instance
+ - label
+ - node
+ - context
+ - units
+ - selected
default:
- - full
- - name: after
- in: query
- description: limit the results on context having data after this timestamp.
- required: false
- schema:
- type: number
- format: integer
- - name: before
+ - dimension
+ - name: group_by_label
in: query
- description: limit the results on context having data before this timestamp.
+ description: |
+ A comma separated list of the label keys to group by their values. The order of the labels in the list is respected.
required: false
schema:
- type: number
- format: integer
- - name: chart_label_key
- in: query
- description: a simple pattern matching charts label keys (use comma or pipe as separator)
- required: false
- allowEmptyValue: true
- schema:
type: string
- - name: chart_labels_filter
+ format: comma separated list of label keys to group by
+ default: ""
+ - name: aggregation
in: query
- description: "a simple pattern matching charts label key and values (use colon for equality, comma or pipe
- as separator)"
+ description: |
+ The aggregation function to apply when grouping metrics together.
+ When option `raw` is given, `average` and `avg` behave like `sum` and the caller is expected to calculate the average.
required: false
- allowEmptyValue: true
- schema:
- type: string
- - name: dimensions
- in: query
- description: a simple pattern matching dimensions (use comma or pipe as separator)
- required: false
- allowEmptyValue: true
schema:
type: string
+ enum:
+ - min
+ - max
+ - avg
+ - average
+ - sum
+ default: average
+ - $ref: '#/components/parameters/scopeNodes'
+ - $ref: '#/components/parameters/scopeContexts'
+ - $ref: '#/components/parameters/filterNodes'
+ - $ref: '#/components/parameters/filterContexts'
+ - $ref: '#/components/parameters/filterInstances'
+ - $ref: '#/components/parameters/filterLabels'
+ - $ref: '#/components/parameters/filterAlerts'
+ - $ref: '#/components/parameters/filterDimensions'
+ - $ref: '#/components/parameters/after'
+ - $ref: '#/components/parameters/before'
+ - $ref: '#/components/parameters/points'
+ - $ref: '#/components/parameters/tier'
+ - $ref: '#/components/parameters/dataQueryOptions'
+ - $ref: '#/components/parameters/dataTimeGroup2'
+ - $ref: '#/components/parameters/dataTimeGroupOptions2'
+ - $ref: '#/components/parameters/dataTimeResampling2'
+ - $ref: '#/components/parameters/dataFormat2'
+ - $ref: '#/components/parameters/timeoutMS'
+ - $ref: '#/components/parameters/callback'
+ - $ref: '#/components/parameters/filename'
+ - $ref: '#/components/parameters/tqx'
responses:
"200":
- description: A javascript object with detailed information about the context.
+ description: |
+ The call was successful. The response includes the data in the format requested.
content:
application/json:
schema:
- $ref: "#/components/schemas/context"
+ oneOf:
+ - $ref: '#/components/schemas/jsonwrap2'
+ - $ref: '#/components/schemas/data_json_formats2'
+ text/plain:
+ schema:
+ type: string
+ format: according to the format requested.
+ text/html:
+ schema:
+ type: string
+ format: html
+ application/x-javascript:
+ schema:
+ type: string
+ format: javascript
"400":
- description: No context id was supplied in the request.
- "404":
- description: No context with the given id is found.
- /alarm_variables:
+ description: |
+ Bad request - the body will include a message stating what is wrong.
+ "500":
+ description: |
+ Internal server error. This usually means the server is out of memory.
+ /api/v1/data:
get:
- summary: List variables available to configure alarms for a chart
- description: Returns the basic information of a chart and all the variables that can
- be used in alarm and template health configurations for the particular
- chart or family.
+ operationId: dataQuery1
+ tags:
+ - data
+ summary: Data Query v1 - Single node, single chart or context queries. without group-by.
+ description: |
+ Query metric data of a chart or context of a node and return a dataset having time-series data for all dimensions available.
+ For group-by functionality, use `/api/v2/data`.
+ At least a `chart` or a `context` have to be given for the data query to be executed.
parameters:
- - name: chart
- in: query
- description: The id of the chart as returned by the /charts call.
- required: true
- schema:
- type: string
- format: as returned by /charts
- default: system.cpu
+ - $ref: '#/components/parameters/chart'
+ - $ref: '#/components/parameters/context'
+ - $ref: '#/components/parameters/dimension'
+ - $ref: '#/components/parameters/chart_label_key'
+ - $ref: '#/components/parameters/chart_labels_filter'
+ - $ref: '#/components/parameters/after'
+ - $ref: '#/components/parameters/before'
+ - $ref: '#/components/parameters/points'
+ - $ref: '#/components/parameters/tier'
+ - $ref: '#/components/parameters/dataQueryOptions'
+ - $ref: '#/components/parameters/dataFormat1'
+ - $ref: '#/components/parameters/dataTimeGroup1'
+ - $ref: '#/components/parameters/dataTimeGroupOptions1'
+ - $ref: '#/components/parameters/dataTimeResampling1'
+ - $ref: '#/components/parameters/timeoutMS'
+ - $ref: '#/components/parameters/callback'
+ - $ref: '#/components/parameters/filename'
+ - $ref: '#/components/parameters/tqx'
responses:
"200":
- description: A javascript object with information about the chart and the
- available variables.
+ description: |
+ The call was successful. The response includes the data in the format requested.
content:
application/json:
schema:
- $ref: "#/components/schemas/alarm_variables"
+ oneOf:
+ - $ref: '#/components/schemas/jsonwrap1'
+ - $ref: '#/components/schemas/data_json_formats1'
+ text/plain:
+ schema:
+ type: string
+ format: according to the format requested.
+ text/html:
+ schema:
+ type: string
+ format: html
+ application/x-javascript:
+ schema:
+ type: string
+ format: javascript
"400":
description: Bad request - the body will include a message stating what is wrong.
"404":
- description: No chart with the given id is found.
+ description: Chart or context is not found. The supplied chart or context will be reported.
"500":
description: Internal server error. This usually means the server is out of
memory.
- /data:
+ /api/v1/allmetrics:
get:
- summary: Get collected data for a specific chart
- description: The data endpoint returns data stored in the round robin database of a
- chart.
+ operationId: allMetrics1
+ tags:
+ - data
+ summary: All Metrics v1 - Fetch latest value for all metrics
+ description: |
+ The `allmetrics` endpoint returns the latest value of all metrics maintained for a netdata node.
parameters:
- - name: chart
+ - name: format
in: query
- description: The id of the chart as returned by the /charts call. Note chart or context must be specified
- required: false
- allowEmptyValue: false
+ description: The format of the response to be returned.
+ required: true
schema:
type: string
- format: as returned by /charts
- default: system.cpu
- - name: context
+ enum:
+ - shell
+ - prometheus
+ - prometheus_all_hosts
+ - json
+ default: shell
+ - name: filter
in: query
- description: The context of the chart as returned by the /charts call. Note chart or context must be specified
+ description: Allows to filter charts out using simple patterns.
required: false
- allowEmptyValue: false
schema:
type: string
- format: as returned by /charts
- - name: dimension
- in: query
- description: Zero, one or more dimension ids or names, as returned by the /chart
- call, separated with comma or pipe. Netdata simple patterns are
- supported.
- required: false
- allowEmptyValue: false
- schema:
- type: array
- items:
- type: string
- format: as returned by /charts
- - name: after
- in: query
- description: "This parameter can either be an absolute timestamp specifying the
- starting point of the data to be returned, or a relative number of
- seconds (negative, relative to parameter: before). Netdata will
- assume it is a relative number if it is less that 3 years (in seconds).
- If not specified the default is -600 seconds. Netdata will adapt this
- parameter to the boundaries of the round robin database unless the allow_past
- option is specified."
- required: true
- allowEmptyValue: false
- schema:
- type: number
- format: integer
- default: -600
- - name: before
+ format: any text
+ - name: variables
in: query
- description: This parameter can either be an absolute timestamp specifying the
- ending point of the data to be returned, or a relative number of
- seconds (negative), relative to the last collected timestamp.
- Netdata will assume it is a relative number if it is less than 3
- years (in seconds). Netdata will adapt this parameter to the
- boundaries of the round robin database. The default is zero (i.e.
- the timestamp of the last value collected).
+ description: |
+ When enabled, netdata will expose various system configuration variables.
required: false
schema:
- type: number
- format: integer
- default: 0
- - name: points
- in: query
- description: The number of points to be returned. If not given, or it is <= 0, or
- it is bigger than the points stored in the round robin database for
- this chart for the given duration, all the available collected
- values for the given duration will be returned.
- required: true
- allowEmptyValue: false
- schema:
- type: number
- format: integer
- default: 20
- - name: chart_label_key
- in: query
- description: Specify the chart label keys that need to match for context queries as comma separated values.
- At least one matching key is needed to match the corresponding chart.
- required: false
- allowEmptyValue: false
- schema:
type: string
- format: key1,key2,key3
- - name: chart_labels_filter
+ enum:
+ - yes
+ - no
+ default: no
+ - name: help
in: query
- description: Specify the chart label keys and values to match for context queries. All keys/values need to
- match for the chart to be included in the query. The labels are specified as key1:value1,key2:value2
+ description: |
+ Enable or disable HELP lines in prometheus output.
required: false
- allowEmptyValue: false
schema:
type: string
- format: key1:value1,key2:value2,key3:value3
- - name: group
+ enum:
+ - yes
+ - no
+ default: no
+ - name: types
in: query
- description: The grouping method. If multiple collected values are to be grouped
- in order to return fewer points, this parameters defines the method
- of grouping. methods supported "min", "max", "average", "sum",
- "incremental-sum". "max" is actually calculated on the absolute
- value collected (so it works for both positive and negative
- dimensions to return the most extreme value in either direction).
- required: true
- allowEmptyValue: false
+ description: |
+ Enable or disable TYPE lines in prometheus output.
+ required: false
schema:
type: string
enum:
- - min
- - max
- - average
- - median
- - stddev
- - sum
- - incremental-sum
- - ses
- - des
- - cv
- - countif
- - percentile
- - percentile25
- - percentile50
- - percentile75
- - percentile80
- - percentile90
- - percentile95
- - percentile97
- - percentile98
- - percentile99
- - trimmed-mean
- - trimmed-mean1
- - trimmed-mean2
- - trimmed-mean3
- - trimmed-mean5
- - trimmed-mean10
- - trimmed-mean15
- - trimmed-mean20
- - trimmed-mean25
- - trimmed-median
- - trimmed-median1
- - trimmed-median2
- - trimmed-median3
- - trimmed-median5
- - trimmed-median10
- - trimmed-median15
- - trimmed-median20
- - trimmed-median25
- default: average
- - name: group_options
+ - yes
+ - no
+ default: no
+ - name: timestamps
in: query
- description: When the group function supports additional parameters, this field
- can be used to pass them to it. Currently only "countif" supports this.
+ description: |
+ Enable or disable timestamps in prometheus output.
required: false
- allowEmptyValue: false
schema:
type: string
- - name: gtime
+ enum:
+ - yes
+ - no
+ default: yes
+ - name: names
in: query
- description: The grouping number of seconds. This is used in conjunction with
- group=average to change the units of metrics (ie when the data is
- per-second, setting gtime=60 will turn them to per-minute).
+ description: |
+ When enabled netdata will report dimension names. When disabled netdata will report dimension IDs. The default is controlled in netdata.conf.
required: false
- allowEmptyValue: false
schema:
- type: number
- format: integer
- default: 0
- - name: timeout
+ type: string
+ enum:
+ - yes
+ - no
+ default: yes
+ - name: oldunits
in: query
- description: Specify a timeout value in milliseconds after which the agent will
- abort the query and return a 503 error. A value of 0 indicates no timeout.
+ description: |
+ When enabled, netdata will show metric names for the default `source=average` as they appeared before 1.12, by using the legacy unit naming conventions.
required: false
- allowEmptyValue: false
- schema:
- type: number
- format: integer
- default: 0
- - name: format
- in: query
- description: The format of the data to be returned.
- required: true
- allowEmptyValue: false
schema:
type: string
enum:
- - json
- - jsonp
- - csv
- - tsv
- - tsv-excel
- - ssv
- - ssvcomma
- - datatable
- - datasource
- - html
- - markdown
- - array
- - csvjsonarray
- default: json
- - name: options
+ - yes
+ - no
+ default: yes
+ - name: hideunits
in: query
- description: Options that affect data generation.
+ description: |
+ When enabled, netdata will not include the units in the metric names, for the default `source=average`.
required: false
- allowEmptyValue: false
schema:
- type: array
- items:
- type: string
- enum:
- - nonzero
- - flip
- - jsonwrap
- - min2max
- - seconds
- - milliseconds
- - abs
- - absolute
- - absolute-sum
- - null2zero
- - objectrows
- - google_json
- - percentage
- - unaligned
- - match-ids
- - match-names
- - allow_past
- - anomaly-bit
- default:
- - seconds
- - jsonwrap
- - name: callback
+ type: string
+ enum:
+ - yes
+ - no
+ default: yes
+ - name: server
in: query
- description: For JSONP responses, the callback function name.
+ description: |
+ Set a distinct name of the client querying prometheus metrics. Netdata will use the client IP if this is not set.
required: false
- allowEmptyValue: true
schema:
type: string
- - name: filename
+ format: any text
+ - name: prefix
in: query
- description: "Add Content-Disposition: attachment; filename= header to
- the response, that will instruct the browser to save the response
- with the given filename."
+ description: |
+ Prefix all prometheus metrics with this string.
required: false
- allowEmptyValue: true
schema:
type: string
- - name: tqx
+ format: any text
+ - name: data
in: query
- description: "[Google Visualization
- API](https://developers.google.com/chart/interactive/docs/dev/imple\
- menting_data_source?hl=en) formatted parameter."
+ description: |
+ Select the prometheus response data source. There is a setting in netdata.conf for the default.
required: false
- allowEmptyValue: true
schema:
type: string
+ enum:
+ - as-collected
+ - average
+ - sum
+ default: average
responses:
"200":
- description: The call was successful. The response includes the data in the
- format requested. Swagger2.0 does not process the discriminator
- field to show polymorphism. The response will be one of the
- sub-types of the data-schema according to the chosen format, e.g.
- json -> data_json.
- content:
- application/json:
- schema:
- $ref: "#/components/schemas/data"
+ description: All the metrics returned in the format requested.
"400":
- description: Bad request - the body will include a message stating what is wrong.
- "404":
- description: Chart or context is not found. The supplied chart or context will be reported.
- "500":
- description: Internal server error. This usually means the server is out of
- memory.
- /badge.svg:
+ description: The format requested is not supported.
+ /api/v1/badge.svg:
get:
+ operationId: badge1
+ tags:
+ - badges
summary: Generate a badge in form of SVG image for a chart (or dimension)
description: Successful responses are SVG images.
parameters:
- - name: chart
- in: query
- description: The id of the chart as returned by the /charts call.
- required: true
- allowEmptyValue: false
- schema:
- type: string
- format: as returned by /charts
- default: system.cpu
+ - $ref: '#/components/parameters/chart'
+ - $ref: '#/components/parameters/dimension'
+ - $ref: '#/components/parameters/after'
+ - $ref: '#/components/parameters/before'
+ - $ref: '#/components/parameters/dataTimeGroup1'
+ - $ref: '#/components/parameters/dataQueryOptions'
- name: alarm
in: query
description: The name of an alarm linked to the chart.
@@ -541,120 +548,6 @@ paths:
schema:
type: string
format: any text
- - name: dimension
- in: query
- description: Zero, one or more dimension ids, as returned by the /chart call.
- required: false
- allowEmptyValue: false
- schema:
- type: array
- items:
- type: string
- format: as returned by /charts
- - name: after
- in: query
- description: This parameter can either be an absolute timestamp specifying the
- starting point of the data to be returned, or a relative number of
- seconds, to the last collected timestamp. Netdata will assume it is
- a relative number if it is smaller than the duration of the round
- robin database for this chart. So, if the round robin database is
- 3600 seconds, any value from -3600 to 3600 will trigger relative
- arithmetics. Netdata will adapt this parameter to the boundaries of
- the round robin database.
- required: true
- allowEmptyValue: false
- schema:
- type: number
- format: integer
- default: -600
- - name: before
- in: query
- description: This parameter can either be an absolute timestamp specifying the
- ending point of the data to be returned, or a relative number of
- seconds, to the last collected timestamp. Netdata will assume it is
- a relative number if it is smaller than the duration of the round
- robin database for this chart. So, if the round robin database is
- 3600 seconds, any value from -3600 to 3600 will trigger relative
- arithmetics. Netdata will adapt this parameter to the boundaries of
- the round robin database.
- required: false
- schema:
- type: number
- format: integer
- default: 0
- - name: group
- in: query
- description: The grouping method. If multiple collected values are to be grouped
- in order to return fewer points, this parameters defines the method
- of grouping. methods are supported "min", "max", "average", "sum",
- "incremental-sum". "max" is actually calculated on the absolute
- value collected (so it works for both positive and negative
- dimensions to return the most extreme value in either direction).
- required: true
- allowEmptyValue: false
- schema:
- type: string
- enum:
- - min
- - max
- - average
- - median
- - stddev
- - sum
- - incremental-sum
- - ses
- - des
- - cv
- - countif
- - percentile
- - percentile25
- - percentile50
- - percentile75
- - percentile80
- - percentile90
- - percentile95
- - percentile97
- - percentile98
- - percentile99
- - trimmed-mean
- - trimmed-mean1
- - trimmed-mean2
- - trimmed-mean3
- - trimmed-mean5
- - trimmed-mean10
- - trimmed-mean15
- - trimmed-mean20
- - trimmed-mean25
- - trimmed-median
- - trimmed-median1
- - trimmed-median2
- - trimmed-median3
- - trimmed-median5
- - trimmed-median10
- - trimmed-median15
- - trimmed-median20
- - trimmed-median25
- default: average
- - name: options
- in: query
- description: Options that affect data generation.
- required: false
- allowEmptyValue: true
- schema:
- type: array
- items:
- type: string
- enum:
- - abs
- - absolute
- - display-absolute
- - absolute-sum
- - null2zero
- - percentage
- - unaligned
- - anomaly-bit
- default:
- - absolute
- name: label
in: query
description: A text to be used as the label.
@@ -673,9 +566,8 @@ paths:
format: any text
- name: label_color
in: query
- description: "A color to be used for the background of the label side(left side) of the badge.
- One of predefined colors or specific color in hex `RGB` or `RRGGBB` format (without preceding `#` character).
- If value wrong or not given default color will be used."
+ description: |
+ A color to be used for the background of the label side(left side) of the badge. One of predefined colors or specific color in hex `RGB` or `RRGGBB` format (without preceding `#` character). If value wrong or not given default color will be used.
required: false
allowEmptyValue: true
schema:
@@ -697,12 +589,8 @@ paths:
format: ^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$
- name: value_color
in: query
- description: "A color to be used for the background of the value *(right)* part of badge. You can set
- multiple using a pipe with a condition each, like this:
- `color<value|color:null` The following operators are
- supported: >, <, >=, <=, =, :null (to check if no value exists).
- Each color can be specified in same manner as for `label_color` parameter.
- Currently only integers are supported as values."
+ description: |
+ A color to be used for the background of the value *(right)* part of badge. You can set multiple using a pipe with a condition each, like this: `color<value|color:null` The following operators are supported: >, <, >=, <=, =, :null (to check if no value exists). Each color can be specified in same manner as for `label_color` parameter. Currently only integers are supported as values.
required: false
allowEmptyValue: true
schema:
@@ -710,9 +598,8 @@ paths:
format: any text
- name: text_color_lbl
in: query
- description: "Font color for label *(left)* part of the badge. One of predefined colors or as HTML hexadecimal
- color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default
- color will be used."
+ description: |
+ Font color for label *(left)* part of the badge. One of predefined colors or as HTML hexadecimal color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default color will be used.
required: false
allowEmptyValue: true
schema:
@@ -734,9 +621,8 @@ paths:
format: ^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$
- name: text_color_val
in: query
- description: "Font color for value *(right)* part of the badge. One of predefined colors or as HTML
- hexadecimal color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value
- given default color will be used."
+ description: |
+ Font color for value *(right)* part of the badge. One of predefined colors or as HTML hexadecimal color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default color will be used.
required: false
allowEmptyValue: true
schema:
@@ -784,12 +670,8 @@ paths:
format: integer
- name: fixed_width_lbl
in: query
- description: "This parameter overrides auto-sizing of badge and creates it with fixed width.
- This parameter determines the size of the label's left side *(label/name)*.
- You must set this parameter together with `fixed_width_val` otherwise it will be ignored.
- You should set the label/value widths wide enough to provide space for all the possible values/contents of
- the badge you're requesting. In case the text cannot fit the space given it will be clipped.
- The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`."
+ description: |
+ This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter determines the size of the label's left side *(label/name)*. You must set this parameter together with `fixed_width_val` otherwise it will be ignored. You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.
required: false
allowEmptyValue: false
schema:
@@ -797,12 +679,8 @@ paths:
format: integer
- name: fixed_width_val
in: query
- description: "This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter
- determines the size of the label's right side *(value)*. You must set this parameter together with
- `fixed_width_lbl` otherwise it will be ignored. You should set the label/value widths wide enough to
- provide space for all the possible values/contents of the badge you're requesting. In case the text cannot
- fit the space given it will be clipped. The `scale` parameter still applies on the values you give to
- `fixed_width_lbl` and `fixed_width_val`."
+ description: |
+ This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter determines the size of the label's right side *(value)*. You must set this parameter together with `fixed_width_lbl` otherwise it will be ignored. You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.
required: false
allowEmptyValue: false
schema:
@@ -818,146 +696,171 @@ paths:
"500":
description: Internal server error. This usually means the server is out of
memory.
- /allmetrics:
+ /api/v2/weights:
get:
- summary: Get a value of all the metrics maintained by netdata
- description: The allmetrics endpoint returns the latest value of all charts and
- dimensions stored in the netdata server.
+ operationId: weights2
+ tags:
+ - weights
+ summary: Score or weight all or some of the metrics, across all nodes, according to various algorithms.
+ description: |
+ This endpoint goes through all metrics and scores them according to an algorithm.
parameters:
- - name: format
+ - $ref: '#/components/parameters/weightMethods'
+ - $ref: '#/components/parameters/scopeNodes'
+ - $ref: '#/components/parameters/scopeContexts'
+ - $ref: '#/components/parameters/filterNodes'
+ - $ref: '#/components/parameters/filterContexts'
+ - $ref: '#/components/parameters/filterInstances'
+ - $ref: '#/components/parameters/filterLabels'
+ - $ref: '#/components/parameters/filterAlerts'
+ - $ref: '#/components/parameters/filterDimensions'
+ - $ref: '#/components/parameters/baselineAfter'
+ - $ref: '#/components/parameters/baselineBefore'
+ - $ref: '#/components/parameters/after'
+ - $ref: '#/components/parameters/before'
+ - $ref: '#/components/parameters/tier'
+ - $ref: '#/components/parameters/points'
+ - $ref: '#/components/parameters/timeoutMS'
+ - $ref: '#/components/parameters/dataQueryOptions'
+ - $ref: '#/components/parameters/dataTimeGroup2'
+ - $ref: '#/components/parameters/dataTimeGroupOptions2'
+ responses:
+ "200":
+ description: JSON object with weights for each context, chart and dimension.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/weights2"
+ "400":
+ description: The given parameters are invalid.
+ "403":
+ description: metrics correlations are not enabled on this Netdata Agent.
+ "404":
+ description: |
+ No charts could be found, or the method that correlated the metrics did not produce any result.
+ "504":
+ description: Timeout - the query took too long and has been cancelled.
+ /api/v1/weights:
+ get:
+ operationId: weights1
+ tags:
+ - weights
+ summary: Score or weight all or some of the metrics of a single node, according to various algorithms.
+ description: |
+ This endpoint goes through all metrics and scores them according to an algorithm.
+ parameters:
+ - $ref: '#/components/parameters/weightMethods'
+ - $ref: '#/components/parameters/context'
+ - $ref: '#/components/parameters/baselineAfter'
+ - $ref: '#/components/parameters/baselineBefore'
+ - $ref: '#/components/parameters/after'
+ - $ref: '#/components/parameters/before'
+ - $ref: '#/components/parameters/tier'
+ - $ref: '#/components/parameters/points'
+ - $ref: '#/components/parameters/timeoutMS'
+ - $ref: '#/components/parameters/dataQueryOptions'
+ - $ref: '#/components/parameters/dataTimeGroup1'
+ - $ref: '#/components/parameters/dataTimeGroupOptions1'
+ responses:
+ "200":
+ description: JSON object with weights for each context, chart and dimension.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/weights"
+ "400":
+ description: The given parameters are invalid.
+ "403":
+ description: metrics correlations are not enabled on this Netdata Agent.
+ "404":
+ description: No charts could be found, or the method
+ that correlated the metrics did not produce any result.
+ "504":
+ description: Timeout - the query took too long and has been cancelled.
+ /api/v1/metric_correlations:
+ get:
+ operationId: metricCorrelations1
+ tags:
+ - weights
+ summary: Analyze all the metrics to find their correlations - EOL
+ description: |
+ THIS ENDPOINT IS OBSOLETE. Use the /weights endpoint. Given two time-windows (baseline, highlight), it goes through all the available metrics, querying both windows and tries to find how these two windows relate to each other. It supports multiple algorithms to do so. The result is a list of all metrics evaluated, weighted for 0.0 (the two windows are more different) to 1.0 (the two windows are similar). The algorithm adjusts automatically the baseline window to be a power of two multiple of the highlighted (1, 2, 4, 8, etc).
+ parameters:
+ - $ref: '#/components/parameters/weightMethods'
+ - $ref: '#/components/parameters/baselineAfter'
+ - $ref: '#/components/parameters/baselineBefore'
+ - $ref: '#/components/parameters/after'
+ - $ref: '#/components/parameters/before'
+ - $ref: '#/components/parameters/points'
+ - $ref: '#/components/parameters/tier'
+ - $ref: '#/components/parameters/timeoutMS'
+ - $ref: '#/components/parameters/dataQueryOptions'
+ - $ref: '#/components/parameters/dataTimeGroup1'
+ - $ref: '#/components/parameters/dataTimeGroupOptions1'
+ responses:
+ "200":
+ description: JSON object with weights for each chart and dimension.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/metric_correlations"
+ "400":
+ description: The given parameters are invalid.
+ "403":
+ description: metrics correlations are not enabled on this Netdata Agent.
+ "404":
+ description: No charts could be found, or the method
+ that correlated the metrics did not produce any result.
+ "504":
+ description: Timeout - the query took too long and has been cancelled.
+ /api/v1/function:
+ get:
+ operationId: function1
+ tags:
+ - functions
+ description: "Execute a collector function."
+ parameters:
+ - name: function
in: query
- description: The format of the response to be returned.
+ description: The name of the function, as returned by the collector.
required: true
+ allowEmptyValue: false
schema:
type: string
- enum:
- - shell
- - prometheus
- - prometheus_all_hosts
- - json
- default: shell
- - name: filter
- in: query
- description: Allows to filter charts out using simple patterns.
- required: false
- schema:
- type: string
- format: any text
- - name: variables
- in: query
- description: When enabled, netdata will expose various system
- configuration metrics.
- required: false
- schema:
- type: string
- enum:
- - yes
- - no
- default: no
- - name: help
- in: query
- description: Enable or disable HELP lines in prometheus output.
- required: false
- schema:
- type: string
- enum:
- - yes
- - no
- default: no
- - name: types
- in: query
- description: Enable or disable TYPE lines in prometheus output.
- required: false
- schema:
- type: string
- enum:
- - yes
- - no
- default: no
- - name: timestamps
- in: query
- description: Enable or disable timestamps in prometheus output.
- required: false
- schema:
- type: string
- enum:
- - yes
- - no
- default: yes
- - name: names
- in: query
- description: When enabled netdata will report dimension names. When disabled
- netdata will report dimension IDs. The default is controlled in
- netdata.conf.
- required: false
- schema:
- type: string
- enum:
- - yes
- - no
- default: yes
- - name: oldunits
- in: query
- description: When enabled, netdata will show metric names for the default
- source=average as they appeared before 1.12, by using the legacy
- unit naming conventions.
- required: false
- schema:
- type: string
- enum:
- - yes
- - no
- default: yes
- - name: hideunits
- in: query
- description: When enabled, netdata will not include the units in the metric
- names, for the default source=average.
- required: false
- schema:
- type: string
- enum:
- - yes
- - no
- default: yes
- - name: server
- in: query
- description: Set a distinct name of the client querying prometheus metrics.
- Netdata will use the client IP if this is not set.
- required: false
- schema:
- type: string
- format: any text
- - name: prefix
- in: query
- description: Prefix all prometheus metrics with this string.
- required: false
- schema:
- type: string
- format: any text
- - name: data
- in: query
- description: Select the prometheus response data source. There is a setting in
- netdata.conf for the default.
- required: false
- schema:
- type: string
- enum:
- - as-collected
- - average
- - sum
- default: average
+ - $ref: '#/components/parameters/timeoutSecs'
responses:
"200":
- description: All the metrics returned in the format requested.
+ description: The collector function has been executed successfully. Each collector may return a different type of content.
"400":
- description: The format requested is not supported.
- /alarms:
+ description: The request was rejected by the collector.
+ "404":
+ description: The requested function is not found.
+ "500":
+ description: Other internal error, getting this error means there is a bug in Netdata.
+ "503":
+ description: The collector to execute the function is not currently available.
+ "504":
+ description: Timeout while waiting for the collector to execute the function.
+ "591":
+ description: The collector sent a response, but it was invalid or corrupted.
+ /api/v1/functions:
+ get:
+ operationId: functions1
+ tags:
+ - functions
+ summary: Get a list of all registered collector functions.
+ description: Collector functions are programs that can be executed on demand.
+ responses:
+ "200":
+ description: A JSON object containing one object per supported function.
+ /api/v1/alarms:
get:
+ operationId: alerts1
+ tags:
+ - alerts
summary: Get a list of active or raised alarms on the server
- description: The alarms endpoint returns the list of all raised or enabled alarms on
- the netdata server. Called without any parameters, the raised alarms in
- state WARNING or CRITICAL are returned. By passing "?all", all the
- enabled alarms are returned.
+ description: |
+ The alarms endpoint returns the list of all raised or enabled alarms on the netdata server. Called without any parameters, the raised alarms in state WARNING or CRITICAL are returned. By passing "?all", all the enabled alarms are returned.
parameters:
- name: all
in: query
@@ -980,15 +883,14 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/alarms"
- /alarms_values:
+ /api/v1/alarms_values:
get:
+ operationId: alertValues1
+ tags:
+ - alerts
summary: Get a list of active or raised alarms on the server
- description: "The alarms_values endpoint returns the list of all raised or enabled alarms on
- the netdata server. Called without any parameters, the raised alarms in
- state WARNING or CRITICAL are returned. By passing '?all', all the
- enabled alarms are returned.
- This option output differs from `/alarms` in the number of variables delivered. This endpoint gives
- to user `id`, `value`, `last_updated` time, and alarm `status`."
+ description: |
+ The alarms_values endpoint returns the list of all raised or enabled alarms on the netdata server. Called without any parameters, the raised alarms in state WARNING or CRITICAL are returned. By passing '?all', all the enabled alarms are returned. This option output differs from `/alarms` in the number of variables delivered. This endpoint gives to user `id`, `value`, `last_updated` time, and alarm `status`.
parameters:
- name: all
in: query
@@ -1011,19 +913,19 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/alarms_values"
- /alarm_log:
+ /api/v1/alarm_log:
get:
+ operationId: alertsLog1
+ tags:
+ - alerts
summary: Retrieves the entries of the alarm log
- description: Returns an array of alarm_log entries, with historical information on
- raised and cleared alarms.
+ description: |
+ Returns an array of alarm_log entries, with historical information on raised and cleared alarms.
parameters:
- name: after
in: query
- description: Passing the parameter after=UNIQUEID returns all the events in the
- alarm log that occurred after UNIQUEID. An automated series of calls
- would call the interface once without after=, store the last
- UNIQUEID of the returned set, and give it back to get incrementally
- the next events.
+ description: |
+ Passing the parameter after=UNIQUEID returns all the events in the alarm log that occurred after UNIQUEID. An automated series of calls would call the interface once without after=, store the last UNIQUEID of the returned set, and give it back to get incrementally the next events.
required: false
schema:
type: integer
@@ -1036,25 +938,18 @@ paths:
type: array
items:
$ref: "#/components/schemas/alarm_log_entry"
- /alarm_count:
+ /api/v1/alarm_count:
get:
+ operationId: alertsCount1
+ tags:
+ - alerts
summary: Get an overall status of the chart
- description: Checks multiple charts with the same context and counts number of alarms
- with given status.
+ description: |
+ Checks multiple charts with the same context and counts number of alarms with given status.
parameters:
- - in: query
- name: context
- description: Specify context which should be checked.
- required: false
- allowEmptyValue: true
- schema:
- type: array
- items:
- type: string
- default:
- - system.cpu
- - in: query
- name: status
+ - $ref: '#/components/parameters/context'
+ - name: status
+ in: query
description: Specify alarm status to count.
required: false
allowEmptyValue: true
@@ -1082,26 +977,52 @@ paths:
"500":
description: Internal server error. This usually means the server is out of
memory.
- /manage/health:
+ /api/v1/alarm_variables:
get:
- summary: "Accesses the health management API to control health checks and
- notifications at runtime."
- description: "Available from Netdata v1.12 and above, protected via bearer
- authorization. Especially useful for maintenance periods, the API allows
- you to disable health checks completely, silence alarm notifications, or
- Disable/Silence specific alarms that match selectors on alarm/template
- name, chart, context, host and family. For the simple disable/silence
- all scenarios, only the cmd parameter is required. The other parameters
- are used to define alarm selectors. For more information and examples,
- refer to the netdata documentation."
+ operationId: getNodeAlertVariables1
+ tags:
+ - alerts
+ summary: List variables available to configure alarms for a chart
+ description: |
+ Returns the basic information of a chart and all the variables that can be used in alarm and template health configurations for the particular chart or family.
+ parameters:
+ - name: chart
+ in: query
+ description: The id of the chart as returned by the /charts call.
+ required: true
+ schema:
+ type: string
+ format: as returned by /charts
+ default: system.cpu
+ responses:
+ "200":
+ description: A javascript object with information about the chart and the
+ available variables.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/alarm_variables"
+ "400":
+ description: Bad request - the body will include a message stating what is wrong.
+ "404":
+ description: No chart with the given id is found.
+ "500":
+ description: Internal server error. This usually means the server is out of
+ memory.
+ /api/v1/manage/health:
+ get:
+ operationId: health1
+ tags:
+ - management
+ summary: |
+ Accesses the health management API to control health checks and notifications at runtime.
+ description: |
+ Available from Netdata v1.12 and above, protected via bearer authorization. Especially useful for maintenance periods, the API allows you to disable health checks completely, silence alarm notifications, or Disable/Silence specific alarms that match selectors on alarm/template name, chart, context, host and family. For the simple disable/silence all scenarios, only the cmd parameter is required. The other parameters are used to define alarm selectors. For more information and examples, refer to the netdata documentation.
parameters:
- name: cmd
in: query
- description: "DISABLE ALL: No alarm criteria are evaluated, nothing is written in
- the alarm log. SILENCE ALL: No notifications are sent. RESET: Return
- to the default state. DISABLE/SILENCE: Set the mode to be used for
- the alarms matching the criteria of the alarm selectors. LIST: Show
- active configuration."
+ description: |
+ DISABLE ALL: No alarm criteria are evaluated, nothing is written in the alarm log. SILENCE ALL: No notifications are sent. RESET: Return to the default state. DISABLE/SILENCE: Set the mode to be used for the alarms matching the criteria of the alarm selectors. LIST: Show active configuration.
required: false
schema:
type: string
@@ -1144,11 +1065,14 @@ paths:
description: A plain text response based on the result of the command.
"403":
description: Bearer authentication error.
- /aclk:
+ /api/v1/aclk:
get:
+ operationId: aclk1
+ tags:
+ - management
summary: Get information about current ACLK state
- description: "ACLK endpoint returns detailed information
- about current state of ACLK (Agent to Cloud communication)."
+ description: |
+ ACLK endpoint returns detailed information about current state of ACLK (Agent to Cloud communication).
responses:
"200":
description: JSON object with ACLK information.
@@ -1156,448 +1080,532 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/aclk_state"
- /metric_correlations:
- get:
- summary: "Analyze all the metrics to find their correlations"
- description: "THIS ENDPOINT IS OBSOLETE. Use the /weights endpoint.
- Given two time-windows (baseline, highlight), it goes
- through all the available metrics, querying both windows and tries to find
- how these two windows relate to each other. It supports
- multiple algorithms to do so. The result is a list of all
- metrics evaluated, weighted for 0.0 (the two windows are
- more different) to 1.0 (the two windows are similar).
- The algorithm adjusts automatically the baseline window to be
- a power of two multiple of the highlighted (1, 2, 4, 8, etc)."
- parameters:
- - name: baseline_after
- in: query
- description: This parameter can either be an absolute timestamp specifying the
- starting point of baseline window, or a relative number of
- seconds (negative, relative to parameter baseline_before). Netdata will
- assume it is a relative number if it is less that 3 years (in seconds).
- required: false
- allowEmptyValue: false
- schema:
- type: number
- format: integer
- default: -300
- - name: baseline_before
- in: query
- description: This parameter can either be an absolute timestamp specifying the
- ending point of the baseline window, or a relative number of
- seconds (negative), relative to the last collected timestamp.
- Netdata will assume it is a relative number if it is less than 3
- years (in seconds).
- required: false
- schema:
- type: number
- format: integer
- default: -60
- - name: after
- in: query
- description: This parameter can either be an absolute timestamp specifying the
- starting point of highlighted window, or a relative number of
- seconds (negative, relative to parameter highlight_before). Netdata will
- assume it is a relative number if it is less that 3 years (in seconds).
- required: false
- allowEmptyValue: false
- schema:
- type: number
- format: integer
- default: -60
- - name: before
- in: query
- description: This parameter can either be an absolute timestamp specifying the
- ending point of the highlighted window, or a relative number of
- seconds (negative), relative to the last collected timestamp.
- Netdata will assume it is a relative number if it is less than 3
- years (in seconds).
- required: false
- schema:
- type: number
- format: integer
- default: 0
- - name: points
- in: query
- description: The number of points to be evaluated for the highlighted window.
- The baseline window will be adjusted automatically to receive a proportional
- amount of points.
- required: false
- allowEmptyValue: false
- schema:
- type: number
- format: integer
- default: 500
- - name: method
- in: query
- description: the algorithm to run
- required: false
- schema:
- type: string
- enum:
- - ks2
- - volume
- default: ks2
- - name: timeout
- in: query
- description: Cancel the query if to takes more that this amount of milliseconds.
- required: false
- allowEmptyValue: false
- schema:
- type: number
- format: integer
- default: 60000
- - name: options
- in: query
- description: Options that affect data generation.
- required: false
- allowEmptyValue: false
- schema:
- type: array
- items:
- type: string
- enum:
- - min2max
- - abs
- - absolute
- - absolute-sum
- - null2zero
- - percentage
- - unaligned
- - allow_past
- - nonzero
- - anomaly-bit
- - raw
- default:
- - null2zero
- - allow_past
- - nonzero
- - unaligned
- - name: group
- in: query
- description: The grouping method. If multiple collected values are to be grouped
- in order to return fewer points, this parameters defines the method
- of grouping. methods supported "min", "max", "average", "sum",
- "incremental-sum". "max" is actually calculated on the absolute
- value collected (so it works for both positive and negative
- dimensions to return the most extreme value in either direction).
- required: true
- allowEmptyValue: false
- schema:
- type: string
- enum:
- - min
- - max
- - average
- - median
- - stddev
- - sum
- - incremental-sum
- - ses
- - des
- - cv
- - countif
- - percentile
- - percentile25
- - percentile50
- - percentile75
- - percentile80
- - percentile90
- - percentile95
- - percentile97
- - percentile98
- - percentile99
- - trimmed-mean
- - trimmed-mean1
- - trimmed-mean2
- - trimmed-mean3
- - trimmed-mean5
- - trimmed-mean10
- - trimmed-mean15
- - trimmed-mean20
- - trimmed-mean25
- - trimmed-median
- - trimmed-median1
- - trimmed-median2
- - trimmed-median3
- - trimmed-median5
- - trimmed-median10
- - trimmed-median15
- - trimmed-median20
- - trimmed-median25
- default: average
- - name: group_options
- in: query
- description: When the group function supports additional parameters, this field
- can be used to pass them to it. Currently only "countif" supports this.
- required: false
- allowEmptyValue: false
- schema:
- type: string
- responses:
- "200":
- description: JSON object with weights for each chart and dimension.
- content:
- application/json:
- schema:
- $ref: "#/components/schemas/metric_correlations"
- "400":
- description: The given parameters are invalid.
- "403":
- description: metrics correlations are not enabled on this Netdata Agent.
- "404":
- description: No charts could be found, or the method
- that correlated the metrics did not produce any result.
- "504":
- description: Timeout - the query took too long and has been cancelled.
- /function:
- get:
- summary: "Execute a collector function."
- parameters:
- - name: function
- in: query
- description: The name of the function, as returned by the collector.
- required: true
- allowEmptyValue: false
- schema:
- type: string
- - name: timeout
- in: query
- description: The timeout in seconds to wait for the function to complete.
- required: false
- schema:
- type: number
- format: integer
- default: 10
- responses:
- "200":
- description: The collector function has been executed successfully. Each collector may return a different type of content.
- "400":
- description: The request was rejected by the collector.
- "404":
- description: The requested function is not found.
- "500":
- description: Other internal error, getting this error means there is a bug in Netdata.
- "503":
- description: The collector to execute the function is not currently available.
- "504":
- description: Timeout while waiting for the collector to execute the function.
- "591":
- description: The collector sent a response, but it was invalid or corrupted.
- /functions:
- get:
- summary: Get a list of all registered collector functions.
- description: Collector functions are programs that can be executed on demand.
- responses:
- "200":
- description: A JSON object containing one object per supported function.
- /weights:
- get:
- summary: "Analyze all the metrics using an algorithm and score them accordingly"
- description: "This endpoint goes through all metrics and scores them according to an algorithm."
- parameters:
- - name: baseline_after
- in: query
- description: This parameter can either be an absolute timestamp specifying the
- starting point of baseline window, or a relative number of
- seconds (negative, relative to parameter baseline_before). Netdata will
- assume it is a relative number if it is less that 3 years (in seconds).
- This parameter is used in KS2 and VOLUME algorithms.
- required: false
- allowEmptyValue: false
- schema:
- type: number
- format: integer
- default: -300
- - name: baseline_before
- in: query
- description: This parameter can either be an absolute timestamp specifying the
- ending point of the baseline window, or a relative number of
- seconds (negative), relative to the last collected timestamp.
- Netdata will assume it is a relative number if it is less than 3
- years (in seconds).
- This parameter is used in KS2 and VOLUME algorithms.
- required: false
- schema:
- type: number
- format: integer
- default: -60
- - name: after
- in: query
- description: This parameter can either be an absolute timestamp specifying the
- starting point of highlighted window, or a relative number of
- seconds (negative, relative to parameter highlight_before). Netdata will
- assume it is a relative number if it is less that 3 years (in seconds).
- required: false
- allowEmptyValue: false
- schema:
- type: number
- format: integer
- default: -60
- - name: before
- in: query
- description: This parameter can either be an absolute timestamp specifying the
- ending point of the highlighted window, or a relative number of
- seconds (negative), relative to the last collected timestamp.
- Netdata will assume it is a relative number if it is less than 3
- years (in seconds).
- required: false
- schema:
- type: number
- format: integer
- default: 0
- - name: context
- in: query
- description: A simple pattern matching the contexts to evaluate.
- required: false
- allowEmptyValue: false
- schema:
- type: string
- - name: points
- in: query
- description: The number of points to be evaluated for the highlighted window.
- The baseline window will be adjusted automatically to receive a proportional
- amount of points.
- This parameter is only used by the KS2 algorithm.
- required: false
- allowEmptyValue: false
- schema:
- type: number
- format: integer
- default: 500
- - name: method
- in: query
- description: the algorithm to run
- required: false
- schema:
- type: string
- enum:
- - ks2
- - volume
- - anomaly-rate
- default: anomaly-rate
- - name: tier
- in: query
- description: Use the specified database tier
- required: false
- allowEmptyValue: false
- schema:
- type: number
- format: integer
- - name: timeout
- in: query
- description: Cancel the query if to takes more that this amount of milliseconds.
- required: false
- allowEmptyValue: false
- schema:
- type: number
- format: integer
- default: 60000
- - name: options
- in: query
- description: Options that affect data generation.
- required: false
- allowEmptyValue: false
- schema:
- type: array
- items:
- type: string
- enum:
- - min2max
- - abs
- - absolute
- - absolute-sum
- - null2zero
- - percentage
- - unaligned
- - nonzero
- - anomaly-bit
- - raw
- default:
- - null2zero
- - nonzero
- - unaligned
- - name: group
- in: query
- description: The grouping method. If multiple collected values are to be grouped
- in order to return fewer points, this parameters defines the method
- of grouping. methods supported "min", "max", "average", "sum",
- "incremental-sum". "max" is actually calculated on the absolute
- value collected (so it works for both positive and negative
- dimensions to return the most extreme value in either direction).
- required: true
- allowEmptyValue: false
- schema:
- type: string
- enum:
- - min
- - max
- - average
- - median
- - stddev
- - sum
- - incremental-sum
- - ses
- - des
- - cv
- - countif
- - percentile
- - percentile25
- - percentile50
- - percentile75
- - percentile80
- - percentile90
- - percentile95
- - percentile97
- - percentile98
- - percentile99
- - trimmed-mean
- - trimmed-mean1
- - trimmed-mean2
- - trimmed-mean3
- - trimmed-mean5
- - trimmed-mean10
- - trimmed-mean15
- - trimmed-mean20
- - trimmed-mean25
- - trimmed-median
- - trimmed-median1
- - trimmed-median2
- - trimmed-median3
- - trimmed-median5
- - trimmed-median10
- - trimmed-median15
- - trimmed-median20
- - trimmed-median25
- default: average
- - name: group_options
- in: query
- description: When the group function supports additional parameters, this field
- can be used to pass them to it. Currently only "countif" supports this.
- required: false
- allowEmptyValue: false
- schema:
- type: string
- responses:
- "200":
- description: JSON object with weights for each context, chart and dimension.
- content:
- application/json:
- schema:
- $ref: "#/components/schemas/weights"
- "400":
- description: The given parameters are invalid.
- "403":
- description: metrics correlations are not enabled on this Netdata Agent.
- "404":
- description: No charts could be found, or the method
- that correlated the metrics did not produce any result.
- "504":
- description: Timeout - the query took too long and has been cancelled.
-servers:
- - url: https://registry.my-netdata.io/api/v1
- - url: http://registry.my-netdata.io/api/v1
components:
+ parameters:
+ scopeNodes:
+ name: scope_nodes
+ in: query
+ description: |
+ A simple pattern limiting the nodes scope of the query. The scope controls both data and metadata response. The simple pattern is checked against the nodes' machine guid, node id and hostname. The default nodes scope is all nodes for which this agent has data for. Usually the nodes scope is used to slice the entire dashboard (e.g. the Global Nodes Selector at the Netdata Cloud overview dashboard). Both positive and negative simple pattern expressions are supported.
+ required: false
+ schema:
+ type: string
+ format: simple pattern
+ default: "*"
+ scopeContexts:
+ name: scope_contexts
+ in: query
+ description: |
+ A simple pattern limiting the contexts scope of the query. The scope controls both data and metadata response. The default contexts scope is all contexts for which this agent has data for. Usually the contexts scope is used to slice data on the dashboard (e.g. each context based chart has its own contexts scope, limiting the chart to all the instances of the selected context). Both positive and negative simple pattern expressions are supported.
+ required: false
+ schema:
+ type: string
+ format: simple pattern
+ default: "*"
+ filterNodes:
+ name: nodes
+ in: query
+ description: |
+ A simple pattern matching the nodes to be queried. This only controls the data response, not the metadata. The simple pattern is checked against the nodes' machine guid, node id, hostname. The default nodes selector is all the nodes matched by the nodes scope. Both positive and negative simple pattern expressions are supported.
+ required: false
+ schema:
+ type: string
+ format: simple pattern
+ default: "*"
+ filterContexts:
+ name: contexts
+ in: query
+ description: |
+ A simple pattern matching the contexts to be queried. This only controls the data response, not the metadata. Both positive and negative simple pattern expressions are supported.
+ required: false
+ schema:
+ type: string
+ format: simple pattern
+ default: "*"
+ filterInstances:
+ name: instances
+ in: query
+ description: |
+ A simple pattern matching the instances to be queried. The simple pattern is checked against the instance `id`, the instance `name`, the fully qualified name of the instance `id` and `name`, like `instance@machine_guid`, where `instance` is either its `id` or `name`. Both positive and negative simple pattern expressions are supported.
+ required: false
+ schema:
+ type: string
+ format: simple pattern
+ default: "*"
+ filterLabels:
+ name: labels
+ in: query
+ description: |
+ A simple pattern matching the labels to be queried. The simple pattern is checked against `name:value` of all the labels of all the eligible instances (as filtered by all the above: scope nodes, scope contexts, nodes, contexts and instances). Negative simple patterns should not be used in this filter.
+ required: false
+ schema:
+ type: string
+ format: simple pattern
+ default: "*"
+ filterAlerts:
+ name: alerts
+ in: query
+ description: |
+ A simple pattern matching the alerts to be queried. The simple pattern is checked against the `name` of alerts and the combination of `name:status`, when status is one of `CLEAR`, `WARNING`, `CRITICAL`, `REMOVED`, `UNDEFINED`, `UNINITIALIZED`, of all the alerts of all the eligible instances (as filtered by all the above). A negative simple pattern will exclude the instances having the labels matched.
+ required: false
+ schema:
+ type: string
+ format: simple pattern
+ default: "*"
+ filterDimensions:
+ name: dimensions
+ in: query
+ description: |
+ A simple patterns matching the dimensions to be queried. The simple pattern is checked against and `id` and the `name` of the dimensions of the eligible instances (as filtered by all the above). Both positive and negative simple pattern expressions are supported.
+ required: false
+ schema:
+ type: string
+ format: simple pattern
+ default: "*"
+
+ dataFormat1:
+ name: format
+ in: query
+ description: The format of the data to be returned.
+ allowEmptyValue: false
+ schema:
+ type: string
+ enum:
+ - json
+ - jsonp
+ - csv
+ - tsv
+ - tsv-excel
+ - ssv
+ - ssvcomma
+ - datatable
+ - datasource
+ - html
+ - markdown
+ - array
+ - csvjsonarray
+ default: json
+ dataFormat2:
+ name: format
+ in: query
+ description: The format of the data to be returned.
+ allowEmptyValue: false
+ schema:
+ type: string
+ enum:
+ - json
+ - json2
+ - jsonp
+ - csv
+ - tsv
+ - tsv-excel
+ - ssv
+ - ssvcomma
+ - datatable
+ - datasource
+ - html
+ - markdown
+ - array
+ - csvjsonarray
+ default: json2
+ dataQueryOptions:
+ name: options
+ in: query
+ description: |
+ Options that affect data generation.
+ * `jsonwrap` - Wrap the output in a JSON object with metadata about the query.
+ * `raw` - change the output so that it is aggregatable across multiple such queries. Supported by `/api/v2` data queries and `json2` format.
+ * `minify` - Remove unnecessary spaces and newlines from the output.
+ * `debug` - Provide additional information in `jsonwrap` output to help tracing issues.
+ * `nonzero` - Do not return dimensions that all their values are zero, to improve the visual appearance of charts. They will still be returned if all the dimensions are entirely zero.
+ * `null2zero` - Replace `null` values with `0`.
+ * `absolute` or `abs` - Traditionally Netdata returns select dimensions negative to improve visual appearance. This option turns this feature off.
+ * `display-absolute` - Only used by badges, to do color calculation using the signed value, but render the value without a sign.
+ * `flip` or `reversed` - Order the timestamps array in reverse order (newest to oldest).
+ * `min2max` - When flattening multi-dimensional data into a single metric format, use `max - min` instead of `sum`. This is EOL - use `/api/v2` to control aggregation across dimensions.
+ * `percentage` - Convert all values into a percentage vs the row total. When enabled, Netdata will query all dimensions, even the ones that have not been selected or are hidden, to find the row total, in order to calculate the percentage of each dimension selected.
+ * `seconds` - Output timestamps in seconds instead of dates.
+ * `milliseconds` or `ms` - Output timestamps in milliseconds instead of dates.
+ * `unaligned` - by default queries are aligned to the the view, so that as time passes past data returned do not change. When a data query will not be used for visualization, `unaligned` can be given to avoid aligning the query time-frame for visual precision.
+ * `match-ids`, `match-names`. By default filters match both IDs and names when they are available. Setting either of the two options will disable the other.
+ * `anomaly-bit` - query the anomaly information instead of metric values. This is EOL, use `/api/v2` and `json2` format which always returns this information and many more.
+ * `jw-anomaly-rates` - return anomaly rates as a separate result set in the same `json` format response. This is EOL, use `/api/v2` and `json2` format which always returns information and many more.
+ * `details` - `/api/v2/data` returns in `jsonwrap` the full tree of dimensions that have been matched by the query.
+ * `group-by-labels` - `/api/v2/data` returns in `jsonwrap` flattened labels per output dimension. These are used to identify the instances that have been aggregated into each dimension, making it possible to provide a map, like Netdata does for Kubernetes.
+ * `natural-points` - return timestamps as found in the database. The result is again fixed-step, but the query engine attempts to align them with the timestamps found in the database.
+ * `virtual-points` - return timestamps independent of the database alignment. This is needed aggregating data across multiple Netdata agents, to ensure that their outputs do not need to be interpolated to be merged.
+ * `selected-tier` - use data exclusively from the selected tier given with the `tier` parameter. This option is set automatically when the `tier` parameter is set.
+ * `all-dimensions` - In `/api/v1` `jsonwrap` include metadata for all candidate metrics examined. In `/api/v2` this is standard behavior and no option is needed.
+ * `label-quotes` - In `csv` output format, enclose each header label in quotes.
+ * `objectrows` - Each row of value should be an object, not an array (only for `json` format).
+ * `google_json` - Comply with google JSON/JSONP specs (only for `json` format).
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: array
+ items:
+ type: string
+ enum:
+ - jsonwrap
+ - raw
+ - minify
+ - debug
+ - nonzero
+ - null2zero
+ - abs
+ - absolute
+ - display-absolute
+ - flip
+ - reversed
+ - min2max
+ - percentage
+ - seconds
+ - ms
+ - milliseconds
+ - unaligned
+ - match-ids
+ - match-names
+ - anomaly-bit
+ - jw-anomaly-rates
+ - details
+ - group-by-labels
+ - natural-points
+ - virtual-points
+ - selected-tier
+ - all-dimensions
+ - label-quotes
+ - objectrows
+ - google_json
+ default:
+ - seconds
+ - jsonwrap
+ dataTimeGroup1:
+ name: group
+ in: query
+ description: |
+ Time aggregation function. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. If the `absolute` option is set, the values are turned positive before applying this calculation.
+ required: false
+ schema:
+ type: string
+ enum:
+ - min
+ - max
+ - avg
+ - average
+ - median
+ - stddev
+ - sum
+ - incremental-sum
+ - ses
+ - des
+ - cv
+ - countif
+ - percentile
+ - percentile25
+ - percentile50
+ - percentile75
+ - percentile80
+ - percentile90
+ - percentile95
+ - percentile97
+ - percentile98
+ - percentile99
+ - trimmed-mean
+ - trimmed-mean1
+ - trimmed-mean2
+ - trimmed-mean3
+ - trimmed-mean5
+ - trimmed-mean10
+ - trimmed-mean15
+ - trimmed-mean20
+ - trimmed-mean25
+ - trimmed-median
+ - trimmed-median1
+ - trimmed-median2
+ - trimmed-median3
+ - trimmed-median5
+ - trimmed-median10
+ - trimmed-median15
+ - trimmed-median20
+ - trimmed-median25
+ default: average
+ dataTimeGroup2:
+ name: time_group
+ in: query
+ description: |
+ Time aggregation function. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. If the `absolute` option is set, the values are turned positive before applying this calculation.
+ required: false
+ schema:
+ type: string
+ enum:
+ - min
+ - max
+ - avg
+ - average
+ - median
+ - stddev
+ - sum
+ - incremental-sum
+ - ses
+ - des
+ - cv
+ - countif
+ - percentile
+ - percentile25
+ - percentile50
+ - percentile75
+ - percentile80
+ - percentile90
+ - percentile95
+ - percentile97
+ - percentile98
+ - percentile99
+ - trimmed-mean
+ - trimmed-mean1
+ - trimmed-mean2
+ - trimmed-mean3
+ - trimmed-mean5
+ - trimmed-mean10
+ - trimmed-mean15
+ - trimmed-mean20
+ - trimmed-mean25
+ - trimmed-median
+ - trimmed-median1
+ - trimmed-median2
+ - trimmed-median3
+ - trimmed-median5
+ - trimmed-median10
+ - trimmed-median15
+ - trimmed-median20
+ - trimmed-median25
+ default: average
+ dataTimeGroupOptions1:
+ name: group_options
+ in: query
+ description: |
+ When the time grouping function supports additional parameters, this field can be used to pass them to it. Currently `countif`, `trimmed-mean`, `trimmed-median` and `percentile` support this. For `countif` the string may start with `<`, `<=`, `<:`, `<>`, `!=`, `>`, `>=`, `>:`. For all others just a number is expected.
+ required: false
+ schema:
+ type: string
+ dataTimeGroupOptions2:
+ name: time_group_options
+ in: query
+ description: |
+ When the time grouping function supports additional parameters, this field can be used to pass them to it. Currently `countif`, `trimmed-mean`, `trimmed-median` and `percentile` support this. For `countif` the string may start with `<`, `<=`, `<:`, `<>`, `!=`, `>`, `>=`, `>:`. For all others just a number is expected.
+ required: false
+ schema:
+ type: string
+ dataTimeResampling1:
+ name: gtime
+ in: query
+ description: |
+ The grouping number of seconds. This is used in conjunction with group=average to change the units of metrics (ie when the data is per-second, setting gtime=60 will turn them to per-minute).
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ dataTimeResampling2:
+ name: time_resampling
+ in: query
+ description: |
+ For incremental values that are "per second", this value is used to resample them to "per minute` (60) or "per hour" (3600). It can only be used in conjunction with group=average.
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ timeoutMS:
+ name: timeout
+ in: query
+ description: |
+ Specify a timeout value in milliseconds after which the agent will abort the query and return a 503 error. A value of 0 indicates no timeout.
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ timeoutSecs:
+ name: timeout
+ in: query
+ description: |
+ Specify a timeout value in seconds after which the agent will abort the query and return a 504 error. A value of 0 indicates no timeout, but some endpoints, like `weights`, do not accept infinite timeouts (they have a predefined default), so to disable the timeout it must be set to a really high value.
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ before:
+ name: before
+ in: query
+ description: |
+ `after` and `before` define the time-frame of a query. `before` can be a negative number of seconds, up to 3 years (-94608000), relative to current clock. If not set, it is assumed to be the current clock time. When `before` is positive, it is assumed to be a unix epoch timestamp. When non-data endpoints support the `after` and `before`, they use the time-frame to limit their response for objects having data retention within the time-frame given.
+ required: false
+ schema:
+ type: integer
+ default: 0
+ after:
+ name: after
+ in: query
+ description: |
+ `after` and `before` define the time-frame of a query. `after` can be a negative number of seconds, up to 3 years (-94608000), relative to `before`. If not set, it is usually assumed to be -600. When non-data endpoints support the `after` and `before`, they use the time-frame to limit their response for objects having data retention within the time-frame given.
+ required: false
+ schema:
+ type: integer
+ default: -600
+ baselineBefore:
+ name: baseline_before
+ in: query
+ description: |
+ `baseline_after` and `baseline_before` define the baseline time-frame of a comparative query. `baseline_before` can be a negative number of seconds, up to 3 years (-94608000), relative to current clock. If not set, it is assumed to be the current clock time. When `baseline_before` is positive, it is assumed to be a unix epoch timestamp.
+ required: false
+ schema:
+ type: integer
+ default: 0
+ baselineAfter:
+ name: baseline_after
+ in: query
+ description: |
+ `baseline_after` and `baseline_before` define the baseline time-frame of a comparative query. `baseline_after` can be a negative number of seconds, up to 3 years (-94608000), relative to `baseline_before`. If not set, it is usually assumed to be -300.
+ required: false
+ schema:
+ type: integer
+ default: -600
+ points:
+ name: points
+ in: query
+ description: |
+ The number of points to be returned. If not given, or it is <= 0, or it is bigger than the points stored in the database for the given duration, all the available collected values for the given duration will be returned. For `weights` endpoints that do statistical analysis, the `points` define the detail of this analysis (the default is 500).
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ tier:
+ name: tier
+ in: query
+ description: |
+ Use only the given dbengine tier for executing the query. Setting this parameters automatically sets the option `selected-tier` for the query.
+ required: false
+ schema:
+ type: number
+ format: integer
+ callback:
+ name: callback
+ in: query
+ description: |
+ For JSONP responses, the callback function name.
+ required: false
+ schema:
+ type: string
+ filename:
+ name: filename
+ in: query
+ description: |
+ Add `Content-Disposition: attachment; filename=` header to the response, that will instruct the browser to save the response with the given filename."
+ required: false
+ schema:
+ type: string
+ tqx:
+ name: tqx
+ in: query
+ description: |
+ [Google Visualization API](https://developers.google.com/chart/interactive/docs/dev/implementing_data_source?hl=en) formatted parameter.
+ required: false
+ schema:
+ type: string
+ contextOptions1:
+ name: options
+ in: query
+ description: Options that affect data generation.
+ required: false
+ schema:
+ type: array
+ items:
+ type: string
+ enum:
+ - full
+ - all
+ - charts
+ - dimensions
+ - labels
+ - uuids
+ - queue
+ - flags
+ - deleted
+ - deepscan
+ chart:
+ name: chart
+ in: query
+ description: The id of the chart as returned by the `/api/v1/charts` call.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ format: as returned by `/api/v1/charts`
+ context:
+ name: context
+ in: query
+ description: The context of the chart as returned by the /charts call.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ format: as returned by /charts
+ dimension:
+ name: dimension
+ in: query
+ description: Zero, one or more dimension ids or names, as returned by the /chart
+ call, separated with comma or pipe. Netdata simple patterns are
+ supported.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: array
+ items:
+ type: string
+ format: as returned by /charts
+ dimensions:
+ name: dimensions
+ in: query
+ description: a simple pattern matching dimensions (use comma or pipe as separator)
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ chart_label_key:
+ name: chart_label_key
+ in: query
+ description: |
+ Specify the chart label keys that need to match for context queries as comma separated values. At least one matching key is needed to match the corresponding chart.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ format: key1,key2,key3
+ chart_labels_filter:
+ name: chart_labels_filter
+ in: query
+ description: |
+ Specify the chart label keys and values to match for context queries. All keys/values need to match for the chart to be included in the query. The labels are specified as key1:value1,key2:value2
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ format: key1:value1,key2:value2,key3:value3
+ weightMethods:
+ name: method
+ in: query
+ description: The weighting / scoring algorithm.
+ required: false
+ schema:
+ type: string
+ enum:
+ - ks2
+ - volume
+ - anomaly-rate
+ - value
schemas:
info:
type: object
@@ -1857,10 +1865,8 @@ components:
amount of time is kept in the round robin database.
dimensions:
type: object
- description: "An object containing all the chart dimensions available for the
- chart. This is used as an indexed array. For each pair in the
- dictionary: the key is the id of the dimension and the value is a
- dictionary containing the name."
+ description: |
+ An object containing all the chart dimensions available for the chart. This is used as an indexed array. For each pair in the dictionary: the key is the id of the dimension and the value is a dictionary containing the name."
additionalProperties:
type: object
properties:
@@ -1998,7 +2004,612 @@ components:
varname2:
type: number
format: float
- data:
+ jsonwrap2:
+ description: |
+ Data response with `format=json2`
+ type: object
+ properties:
+ api:
+ $ref: '#/components/schemas/api'
+ agents:
+ $ref: '#/components/schemas/agents'
+ versions:
+ $ref: '#/components/schemas/versions'
+ summary:
+ description: |
+ Summarized information about nodes, contexts, instances, labels, alerts, and dimensions. The items returned are determined by the scope of the query only, however the statistical data in them are influenced by the filters of the query. Using this information the dashboard allows users to slice and dice the data by filtering and grouping.
+ type: object
+ properties:
+ nodes:
+ type: array
+ items:
+ $ref: '#/components/schemas/nodeWithDataStatistics'
+ contexts:
+ type: array
+ items:
+ type: object
+ description: |
+ An object describing a unique context. `is` stands for instances, `ds` for dimensions, `al` for alerts, `sts` for statistics.
+ properties:
+ id:
+ description: the context id.
+ type: string
+ is:
+ $ref: "#/components/schemas/jsonwrap2_items_count"
+ ds:
+ $ref: "#/components/schemas/jsonwrap2_items_count"
+ al:
+ $ref: "#/components/schemas/jsonwrap2_alerts_count"
+ sts:
+ oneOf:
+ - $ref: "#/components/schemas/jsonwrap2_sts"
+ - $ref: "#/components/schemas/jsonwrap2_sts_raw"
+ instances:
+ type: array
+ items:
+ type: object
+ description: |
+ An object describing an instance. `ds` stands for dimensions, `al` for alerts, `sts` for statistics.
+ properties:
+ id:
+ description: the id of the instance.
+ type: string
+ nm:
+ description: the name of the instance (may be absent when it is the same with the id)
+ type: string
+ ni:
+ description: the node index id this instance belongs to. The UI uses this to compone the fully qualified name of the instance, using the node hostname to present it to users and its machine guid to add it to filters.
+ ds:
+ $ref: "#/components/schemas/jsonwrap2_items_count"
+ al:
+ $ref: "#/components/schemas/jsonwrap2_alerts_count"
+ sts:
+ oneOf:
+ - $ref: "#/components/schemas/jsonwrap2_sts"
+ - $ref: "#/components/schemas/jsonwrap2_sts_raw"
+ dimensions:
+ type: array
+ items:
+ type: object
+ description: |
+ An object describing a unique dimension. `ds` stands for `dimensions`, `sts` for statistics.
+ properties:
+ id:
+ description: the id of the dimension.
+ type: string
+ nm:
+ description: the name of the dimension (may be absent when it is the same with the id)
+ type: string
+ ds:
+ $ref: "#/components/schemas/jsonwrap2_items_count"
+ sts:
+ oneOf:
+ - $ref: "#/components/schemas/jsonwrap2_sts"
+ - $ref: "#/components/schemas/jsonwrap2_sts_raw"
+ labels:
+ type: array
+ items:
+ type: object
+ description: |
+ An object describing a label key. `ds` stands for `dimensions`, `sts` for statistics.
+ properties:
+ id:
+ description: the key of the label.
+ type: string
+ ds:
+ $ref: "#/components/schemas/jsonwrap2_items_count"
+ sts:
+ oneOf:
+ - $ref: "#/components/schemas/jsonwrap2_sts"
+ - $ref: "#/components/schemas/jsonwrap2_sts_raw"
+ vl:
+ description: |
+ An array of values for this key.
+ type: array
+ items:
+ type: object
+ properties:
+ id:
+ description: The value string
+ type: string
+ ds:
+ $ref: "#/components/schemas/jsonwrap2_items_count"
+ sts:
+ oneOf:
+ - $ref: "#/components/schemas/jsonwrap2_sts"
+ - $ref: "#/components/schemas/jsonwrap2_sts_raw"
+ alerts:
+ description: |
+ An array of all the unique alerts running, grouped by alert name (`nm` is available here)
+ type: array
+ items:
+ $ref: "#/components/schemas/jsonwrap2_alerts_count"
+ totals:
+ type: object
+ properties:
+ nodes:
+ $ref: "#/components/schemas/jsonwrap2_items_count"
+ contexts:
+ $ref: "#/components/schemas/jsonwrap2_items_count"
+ instances:
+ $ref: "#/components/schemas/jsonwrap2_items_count"
+ dimensions:
+ $ref: "#/components/schemas/jsonwrap2_items_count"
+ label_keys:
+ $ref: "#/components/schemas/jsonwrap2_items_count"
+ label_key_values:
+ $ref: "#/components/schemas/jsonwrap2_items_count"
+ functions:
+ type: array
+ items:
+ type: string
+ db:
+ type: object
+ properties:
+ tiers:
+ description: |
+ The number of tiers this server is using.
+ type: integer
+ update_every:
+ description: |
+ The minimum update every, in seconds, for all tiers and all metrics aggregated into this query.
+ type: integer
+ first_entry:
+ description: |
+ The minimum unix epoch timestamp of the retention across all tiers for all metrics aggregated into this query.
+ type: integer
+ last_entry:
+ description: |
+ The maximum unix epoch timestamp of the retention across all tier for all metrics aggregated into this query.
+ type: integer
+ per_tier:
+ description: |
+ An array with information for each of the tiers available, related to this query.
+ type: array
+ items:
+ type: object
+ properties:
+ tier:
+ description: |
+ The tier number of this tier, starting at 0.
+ type: integer
+ queries:
+ description: |
+ The number of queries executed on this tier. Usually one query per metric is made, but the query may cross multiple tier, in which case more than one query per metric is made.
+ type: integer
+ points:
+ description: |
+ The number of points read from this tier.
+ type: integer
+ update_every:
+ description: |
+ The minimum resolution of all metrics queried on this tier.
+ type: integer
+ first_entry:
+ description: |
+ The minimum unix epoch timestamp available across all metrics that used this tier. This reflects the oldest timestamp of the tier's retention.
+ type: integer
+ last_entry:
+ description: |
+ The maximum unix epoch timestamp available across all metrics that used this tier. This reflects the newest timestamp of the tier's retention.
+ units:
+ description: |
+ The units of the database data
+ oneOf:
+ - type: string
+ - type: array
+ items:
+ type: string
+ dimensions:
+ type: object
+ properties:
+ ids:
+ description: |
+ An array with the dimension ids that uniquely identify the dimensions for this query. It is the same with `view.dimensions.ids`.
+ type: array
+ items:
+ type: string
+ units:
+ description: |
+ An array with the units each dimension has in the database (independent of group-by aggregation that may override the units).
+ type: array
+ items:
+ type: string
+ sts:
+ description: |
+ Statistics about the data collection points used for each dimension.
+ oneOf:
+ - $ref: "#/components/schemas/jsonwrap2_sts"
+ - $ref: "#/components/schemas/jsonwrap2_sts_raw"
+ view:
+ type: object
+ properties:
+ title:
+ description: |
+ The title the chart should have.
+ type: string
+ format:
+ description: |
+ The format the `result` top level member has. Available on when `debug` flag is set.
+ type: string
+ options:
+ description: |
+ An array presenting all the options given to the query. Available on when `debug` flag is set.
+ type: array
+ items:
+ type: string
+ time_group:
+ description: |
+ The same as the parameter `time_group`. Available on when `debug` flag is set.
+ type: string
+ after:
+ description: |
+ The oldest unix epoch timestamp of the data returned in the `result`.
+ type: integer
+ before:
+ description: |
+ The newest unix epoch timestamp of the data returned in the `result`.
+ type: integer
+ partial_data_trimming:
+ description: |
+ Information related to trimming of the last few points of the `result`, that was required to remove (increasing) partial data.
+ Trimming is disabled when the `raw` option is given to the query.
+ This object is available only when the `debug` flag is set.
+ type: object
+ properties:
+ max_update_every:
+ description: |
+ The maximum `update_every` for all metrics aggregated into the query.
+ Trimming is by default enabled at `view.before - max_update_every`, but only when `view.before >= now - max_update_every`.
+ type: integer
+ expected_after:
+ description: |
+ The timestamp at which trimming can be enabled.
+ If this timestamp is greater or equal to `view.before`, there is no trimming.
+ type: integer
+ trimmed_after:
+ description: |
+ The timestamp at which trimming has been applied.
+ If this timestamp is greater or equal to `view.before`, there is no trimming.
+ points:
+ description: |
+ The number of points in `result`. Available only when `raw` is given.
+ type: integer
+ units:
+ description: |
+ The units of the query.
+ oneOf:
+ - type: string
+ - type: array
+ items:
+ type: string
+ chart_type:
+ description: |
+ The default chart type of the query.
+ type: string
+ enum:
+ - line
+ - area
+ - stacked
+ dimensions:
+ description: |
+ Detailed information about the chart dimensions included in the `result`.
+ type: object
+ properties:
+ grouped_by:
+ description: |
+ An array with the order of the groupings performed.
+ type: array
+ items:
+ type: string
+ enum:
+ - selected
+ - dimension
+ - instance
+ - node
+ - context
+ - units
+ - "label:key1"
+ - "label:key2"
+ - "label:keyN"
+ ids:
+ description: |
+ An array with the dimension ids that uniquely identify the dimensions for this query.
+ type: array
+ items:
+ type: string
+ names:
+ description: |
+ An array with the dimension names to be presented to users. Names may be overlapping, but IDs are not.
+ type: array
+ items:
+ type: string
+ priorities:
+ description: |
+ An array with the relative priorities of the dimensions.
+ Numbers may not be sequential or unique. The application is expected to order by this and then by name.
+ type: array
+ items:
+ type: integer
+ aggregated:
+ description: |
+ An array with the number of source metrics aggregated into each dimension.
+ type: array
+ items:
+ type: integer
+ units:
+ description: |
+ An array with the units each dimension has.
+ type: array
+ items:
+ type: string
+ sts:
+ description: |
+ Statistics about the view points for each dimension.
+ oneOf:
+ - $ref: "#/components/schemas/jsonwrap2_sts"
+ - $ref: "#/components/schemas/jsonwrap2_sts_raw"
+ labels:
+ description: |
+ The labels associated with each dimension in the query.
+ This object is only available when the `group-by-labels` option is given to the query.
+ type: object
+ properties:
+ label_key1:
+ description: |
+ An array having one entry for each of the dimensions of the query.
+ type: array
+ items:
+ description: |
+ An array having one entry for each of the values this label key has for the given dimension.
+ type: array
+ items:
+ type: string
+ min:
+ description: |
+ The minimum value of all points included in the `result`.
+ type: number
+ max:
+ description: |
+ The maximum value of all points included in the `result`.
+ type: number
+ result:
+ $ref: '#/components/schemas/data_json_formats2'
+ timings:
+ type: object
+ jsonwrap2_sts:
+ description: |
+ Statistical values
+ type: object
+ properties:
+ min:
+ description: The minimum value of all metrics aggregated
+ type: number
+ max:
+ description: The maximum value of all metrics aggregated
+ type: number
+ avg:
+ description: The average value of all metrics aggregated
+ type: number
+ arp:
+ description: The average anomaly rate of all metrics aggregated
+ type: number
+ con:
+ description: The contribution percentage of all the metrics aggregated
+ type: number
+ jsonwrap2_sts_raw:
+ description: |
+ Statistical values when `raw` option is given.
+ type: object
+ properties:
+ min:
+ description: The minimum value of all metrics aggregated
+ type: number
+ max:
+ description: The maximum value of all metrics aggregated
+ type: number
+ sum:
+ description: The sum value of all metrics aggregated
+ type: number
+ ars:
+ description: The sum anomaly rate of all metrics aggregated
+ type: number
+ vol:
+ description: The volume of all the metrics aggregated
+ type: number
+ cnt:
+ description: The count of all metrics aggregated
+ type: integer
+ jsonwrap2_items_count:
+ description: |
+ Depending on the placement of this object, `items` may be `nodes`, `contexts`, `instances`, `dimensions`, `label keys`, `label key-value pairs`. Furthermore, if the whole object is missing it should be assumed that all its members are zero.
+ type: object
+ properties:
+ sl:
+ description: The number of items `selected` to query. If absent it is zero.
+ type: integer
+ ex:
+ description: The number of items `excluded` from querying. If absent it is zero.
+ type: integer
+ qr:
+ description: The number of items (out of `selected`) the query successfully `queried`. If absent it is zero.
+ type: integer
+ fl:
+ description: The number of items (from `selected`) that `failed` to be queried. If absent it is zero.
+ type: integer
+ jsonwrap2_alerts_count:
+ description: |
+ Counters about alert statuses. If this object is missing, it is assumed that all its members are zero.
+ type: object
+ properties:
+ nm:
+ description: The name of the alert. Can be absent when the counters refer to more than one alert instances.
+ type: string
+ cl:
+ description: The number of CLEAR alerts. If absent, it is zero.
+ type: integer
+ wr:
+ description: The number of WARNING alerts. If absent, it is zero.
+ type: integer
+ cr:
+ description: The number of CRITICAL alerts. If absent, it is zero.
+ type: integer
+ ot:
+ description: |
+ The number of alerts that are not CLEAR, WARNING, CRITICAL (so, they are "other"). If absent, it is zero.
+ type: integer
+ api:
+ description: The version of the API used.
+ type: integer
+ agents:
+ description: |
+ An array of agent definitions consulted to compose this response.
+ type: array
+ items:
+ type: object
+ properties:
+ mg:
+ description: The agent machine GUID.
+ type: string
+ format: uuid
+ nd:
+ description: The agent cloud node ID.
+ type: string
+ format: uuid
+ nm:
+ description: The agent hostname.
+ type: string
+ ai:
+ description: The agent index ID for this agent, in this response.
+ type: integer
+ now:
+ description: The current unix epoch timestamp of this agent.
+ type: integer
+ versions:
+ description: |
+ Hashes that allow the caller to detect important database changes of Netdata agents.
+ type: object
+ properties:
+ nodes_hard_hash:
+ description: |
+ An auto-increment value that reflects the number of changes to the number of nodes maintained by the server. Everytime a node is added or removed, this number gets incremented.
+ type: integer
+ contexts_hard_hash:
+ description: |
+ An auto-increment value that reflects the number of changes to the number of contexts maintained by the server. Everytime a context is added or removed, this number gets incremented.
+ type: integer
+ contexts_soft_hash:
+ description: |
+ An auto-increment value that reflects the number of changes to the queue that sends contexts updates to Netdata Cloud. Everytime the contents of a context are updated, this number gets incremented.
+ type: integer
+ alerts_hard_hash:
+ description: |
+ An auto-increment value that reflects the number of changes to the number of alerts. Everytime an alert is added or removed, this number gets incremented.
+ type: integer
+ alerts_soft_hash:
+ description: |
+ An auto-increment value that reflects the number of alerts transitions. Everytime an alert transitions to a new state, this number gets incremented.
+ type: integer
+ nodeBasic:
+ type: object
+ description: Basic information about a node.
+ required:
+ - ni
+ - st
+ properties:
+ mg:
+ description: The machine guid of the node. May not be available if the request is served by the Netdata Cloud.
+ type: string
+ format: UUID
+ nd:
+ description: The node id of the node. May not be available if the node is not registered to Netdata Cloud.
+ type: string
+ format: UUID
+ nm:
+ description: The name (hostname) of the node.
+ type: string
+ ni:
+ description: The node index id, a number that uniquely identifies this node for this query.
+ type: integer
+ st:
+ description: Status information about the communication with this node.
+ type: object
+ properties:
+ ai:
+ description: The agent index id that has been contacted for this node.
+ type: integer
+ code:
+ description: The HTTP response code of the response for this node. When working directly with an agent, this is always 200. If the `code` is missing, it should be assumed to be 200.
+ type: integer
+ msg:
+ description: A human readable description of the error, if any. If `msg` is missing, or is the empty string `""` or is `null`, there is no description associated with the current status.
+ type: string
+ ms:
+ description: The time in milliseconds this node took to respond, or if the local agent responded for this node, the time it needed to execute the query. If `ms` is missing, the time that was required to query this node is unknown.
+ type: number
+ nodeWithDataStatistics:
+ allOf:
+ - $ref: '#/components/schemas/nodeBasic'
+ - type: object
+ description: |
+ `is` stands for instances, `ds` for dimensions, `al` for alerts, `sts` for statistics.
+ properties:
+ is:
+ $ref: "#/components/schemas/jsonwrap2_items_count"
+ ds:
+ $ref: "#/components/schemas/jsonwrap2_items_count"
+ al:
+ $ref: "#/components/schemas/jsonwrap2_alerts_count"
+ sts:
+ oneOf:
+ - $ref: "#/components/schemas/jsonwrap2_sts"
+ - $ref: "#/components/schemas/jsonwrap2_sts_raw"
+ nodeFull:
+ allOf:
+ - $ref: '#/components/schemas/nodeBasic'
+ - type: object
+ properties:
+ version:
+ description: The version of the Netdata Agent the node runs.
+ type: string
+ hops:
+ description: How many hops away from the origin node, the queried one is. 0 means the agent itself is the origin node.
+ type: integer
+ state:
+ description: The current state of the node on this agent.
+ type: string
+ enum:
+ - reachable
+ - stale
+ - offline
+ context2Basic:
+ type: object
+ properties:
+ family:
+ type: string
+ priority:
+ type: integer
+ first_entry:
+ type: integer
+ last_entry:
+ type: integer
+ live:
+ type: boolean
+ contexts2:
+ description: |
+ `/api/v2/contexts` and `/api/v2/q` response about multi-node contexts hosted by a Netdata agent.
+ type: object
+ properties:
+ api:
+ $ref: '#/components/schemas/api'
+ agents:
+ $ref: '#/components/schemas/agents'
+ versions:
+ $ref: '#/components/schemas/versions'
+ contexts:
+ additionalProperties:
+ $ref: '#/components/schemas/context2Basic'
+ jsonwrap1:
type: object
discriminator:
propertyName: format
@@ -2007,7 +2618,7 @@ components:
properties:
api:
type: number
- description: The API version this conforms to, currently 1.
+ description: The API version this conforms to.
id:
type: string
description: The unique id of the chart.
@@ -2080,101 +2691,132 @@ components:
chart_variables:
type: object
additionalProperties:
- $ref: "#/components/schemas/chart_variables"
+ $ref: '#/components/schemas/chart_variables'
+ result:
+ $ref: '#/components/schemas/data_json_formats1'
+ data_json_formats1:
+ description: |
+ Depending on the `format` given to a data query, any of the following may be returned.
+ oneOf:
+ - $ref: '#/components/schemas/data_json'
+ - $ref: '#/components/schemas/data_datatable'
+ - $ref: '#/components/schemas/data_csvjsonarray'
+ - $ref: '#/components/schemas/data_array'
+ - $ref: '#/components/schemas/data_txt'
+ data_json_formats2:
+ description: |
+ Depending on the `format` given to a data query, any of the following may be returned.
+ oneOf:
+ - $ref: '#/components/schemas/data_json2'
+ - $ref: '#/components/schemas/data_json_formats1'
+ data_json2:
+ type: object
+ properties:
+ labels:
+ description: |
+ The IDs of the dimensions returned. The first is always `time`.
+ type: array
+ items:
+ type: string
+ point:
+ description: |
+ The format of each point returned.
+ type: object
+ properties:
+ value:
+ description: |
+ The index of the value in each point.
+ type: integer
+ arp:
+ description: |
+ The index of the anomaly rate in each point.
+ type: integer
+ pa:
+ description: |
+ The index of the point annotations in each point.
+ This is a bitmap. `EMPTY = 1`, `RESET = 2`, `PARTIAL = 4`.
+ `EMPTY` means the point has no value.
+ `RESET` means that at least one metric aggregated experienced an overflow (a counter that wrapped).
+ `PARTIAL` means that this point should have more metrics aggregated into it, but not all metrics had data.
+ type: integer
+ count:
+ description: |
+ The number of metrics aggregated into this point. This exists only when the option `raw` is given to the query.
+ type: integer
+ data:
+ type: array
+ items:
+ allOf:
+ - type: integer
+ - type: array
data_json:
- description: Data response in json format.
- allOf:
- - $ref: "#/components/schemas/data"
- - properties:
- result:
- type: object
- properties:
- labels:
- description: The dimensions retrieved from the chart.
- type: array
- items:
- type: string
- data:
- description: The data requested, one element per sample with each element
- containing the values of the dimensions described in the
- labels value.
- type: array
- items:
- type: number
- description: The result requested, in the format requested.
- data_flat:
- description: Data response in csv / tsv / tsv-excel / ssv / ssv-comma / markdown /
- html formats.
- allOf:
- - $ref: "#/components/schemas/data"
- - properties:
- result:
- type: string
+ description: Data response in `json` format.
+ type: object
+ properties:
+ labels:
+ description: The dimensions retrieved from the chart.
+ type: array
+ items:
+ type: string
+ data:
+ description: |
+ The data requested, one element per sample with each element containing the values of the dimensions described in the labels value.
+ type: array
+ items:
+ type: number
+ data_txt:
+ description: |
+ Data response in `csv`, `tsv`, `tsv-excel`, `ssv`, `ssv-comma`, `markdown`, `html` formats.
+ type: string
data_array:
- description: Data response in array format.
- allOf:
- - $ref: "#/components/schemas/data"
- - properties:
- result:
- type: array
- items:
- type: number
+ description: Data response in `array` format.
+ type: array
+ items:
+ type: number
data_csvjsonarray:
- description: Data response in csvjsonarray format.
- allOf:
- - $ref: "#/components/schemas/data"
- - properties:
- result:
- description: The first inner array contains strings showing the labels of
- each column, each subsequent array contains the values for each
- point in time.
- type: array
- items:
- type: array
- items: {}
+ description: |
+ The first inner array contains strings showing the labels of each column, each subsequent array contains the values for each point in time.
+ type: array
+ items:
+ type: array
+ items: {}
data_datatable:
- description: Data response in datatable / datasource formats (suitable for Google
- Charts).
- allOf:
- - $ref: "#/components/schemas/data"
- - properties:
- result:
- type: object
- properties:
- cols:
- type: array
- items:
- type: object
- properties:
- id:
- description: Always empty - for future use.
- label:
- description: The dimension returned from the chart.
- pattern:
- description: Always empty - for future use.
- type:
- description: The type of data in the column / chart-dimension.
- p:
- description: Contains any annotations for the column.
- required:
- - id
- - label
- - pattern
- - type
- rows:
- type: array
- items:
- type: object
- properties:
- c:
- type: array
- items:
- properties:
- v:
- description: "Each value in the row is represented by an
- object named `c` with five v fields: data, null,
- null, 0, the value. This format is fixed by the
- Google Charts API."
+ description: |
+ Data response in datatable / datasource formats (suitable for Google Charts).
+ type: object
+ properties:
+ cols:
+ type: array
+ items:
+ type: object
+ properties:
+ id:
+ description: Always empty - for future use.
+ label:
+ description: The dimension returned from the chart.
+ pattern:
+ description: Always empty - for future use.
+ type:
+ description: The type of data in the column / chart-dimension.
+ p:
+ description: Contains any annotations for the column.
+ required:
+ - id
+ - label
+ - pattern
+ - type
+ rows:
+ type: array
+ items:
+ type: object
+ properties:
+ c:
+ type: array
+ items:
+ properties:
+ v:
+ description: |
+ Each value in the row is represented by an object named `c` with five v fields: data, null, null, 0, the value. This format is fixed by the Google Charts API."
alarms:
type: object
properties:
@@ -2419,9 +3061,8 @@ components:
properties:
aclk-available:
type: string
- description: "Describes whether this agent is capable of connection to the Cloud.
- False means agent has been built without ACLK component either on purpose (user choice)
- or due to missing dependency."
+ description: |
+ Describes whether this agent is capable of connection to the Cloud. False means agent has been built without ACLK component either on purpose (user choice) or due to missing dependency.
aclk-version:
type: integer
description: Describes which ACLK version is currently used.
@@ -2529,6 +3170,8 @@ components:
type: number
dimension2-name:
type: number
+ weights2:
+ type: object
weights:
type: object
properties:
diff --git a/web/api/queries/README.md b/web/api/queries/README.md
index 2a17ac784..dacd2900e 100644
--- a/web/api/queries/README.md
+++ b/web/api/queries/README.md
@@ -1,11 +1,10 @@
-<!--
-title: "Database Queries"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/README.md
--->
+# Database queries/lookup
-# Database Queries
+This document explains in detail the options available to retrieve data from the Netdata timeseries database in order to configure alerts, create badges or
+create custom charts.
-Netdata database can be queried with `/api/v1/data` and `/api/v1/badge.svg` REST API methods.
+The Netdata database can be queried with the `/api/v1/data` and `/api/v1/badge.svg` REST API methods. The database is also queried from the `lookup` line
+in an [alert configuration](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md).
Every data query accepts the following parameters:
@@ -104,18 +103,24 @@ For each value it calls the **grouping method** given with the `&group=` query p
The following grouping methods are supported. These are given all the values in the time-frame
and they group the values every `group points`.
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min&value_color=blue) finds the minimum value
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max&value_color=lightblue) finds the maximum value
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average&value_color=yellow) finds the average value
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=sum&after=-60&label=sum&units=requests&value_color=orange) adds all the values and returns the sum
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=median&after=-60&label=median&value_color=red) sorts the values and returns the value in the middle of the list
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=stddev&after=-60&label=stddev&value_color=green) finds the standard deviation of the values
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=cv&after=-60&label=cv&units=pcent&value_color=yellow) finds the relative standard deviation (coefficient of variation) of the values
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=ses&after=-60&label=ses&value_color=brown) finds the exponential weighted moving average of the values
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=des&after=-60&label=des&value_color=blue) applies Holt-Winters double exponential smoothing
-- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=incremental_sum&after=-60&label=incremental_sum&value_color=red) finds the difference of the last vs the first value
-
-The examples shown above, are live information from the `successful` web requests of the global Netdata registry.
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=min&after=-60&label=min&value_color=blue) finds the minimum value
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=max&after=-60&label=max&value_color=lightblue) finds the maximum value
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=average&after=-60&label=average&value_color=yellow) finds the average value
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=sum&units=kilobits&after=-60&label=sum&value_color=orange) adds all the values and returns the sum
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=median&after=-60&label=median&value_color=red) sorts the values and returns the value in the middle of the list
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=stddev&after=-60&label=stddev&value_color=green) finds the standard deviation of the values
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=cv&after=-60&label=cv&units=pcent&value_color=yellow) finds the relative standard deviation (coefficient of variation) of the values
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=ses&after=-60&label=ses&value_color=brown) finds the exponential weighted moving average of the values
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=des&after=-60&label=des&value_color=blue) applies Holt-Winters double exponential smoothing
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=net.eth0&options=unaligned&dimensions=received&group=incremental_sum&after=-60&label=incremental_sum&value_color=red) finds the difference of the last vs the first value
+
+The examples shown above show live information from the `received` traffic on the `eth0` interface of the global Netdata registry.
+Inspect any of the badges to see the parameters provided. You can directly issue the request to the registry server's API yourself, e.g. by
+passing the following to get the value shown on the badge for the sum of the values within the period:
+
+```
+https://registry.my-netdata.io/api/v1/data?chart=net.eth0&options=unaligned&dimensions=received&group=sum&units=kilobits&after=-60&label=sum&points=1
+```
## Further processing
diff --git a/web/api/queries/average/README.md b/web/api/queries/average/README.md
index b8d4ba7e7..c9aa402cb 100644
--- a/web/api/queries/average/README.md
+++ b/web/api/queries/average/README.md
@@ -1,6 +1,10 @@
<!--
title: "Average or Mean"
+sidebar_label: "Average or Mean"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/average/README.md
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Queries"
-->
# Average or Mean
diff --git a/web/api/queries/average/average.c b/web/api/queries/average/average.c
index 0719d57fa..f54dcb243 100644
--- a/web/api/queries/average/average.c
+++ b/web/api/queries/average/average.c
@@ -2,58 +2,3 @@
#include "average.h"
-// ----------------------------------------------------------------------------
-// average
-
-struct grouping_average {
- NETDATA_DOUBLE sum;
- size_t count;
-};
-
-void grouping_create_average(RRDR *r, const char *options __maybe_unused) {
- r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_average));
-}
-
-// resets when switches dimensions
-// so, clear everything to restart
-void grouping_reset_average(RRDR *r) {
- struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data;
- g->sum = 0;
- g->count = 0;
-}
-
-void grouping_free_average(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
- r->internal.grouping_data = NULL;
-}
-
-void grouping_add_average(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data;
- g->sum += value;
- g->count++;
-}
-
-NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data;
-
- NETDATA_DOUBLE value;
-
- if(unlikely(!g->count)) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
- else {
- if(unlikely(r->internal.resampling_group != 1)) {
- if (unlikely(r->result_options & RRDR_RESULT_OPTION_VARIABLE_STEP))
- value = g->sum / g->count / r->internal.resampling_divisor;
- else
- value = g->sum / r->internal.resampling_divisor;
- } else
- value = g->sum / g->count;
- }
-
- g->sum = 0.0;
- g->count = 0;
-
- return value;
-}
diff --git a/web/api/queries/average/average.h b/web/api/queries/average/average.h
index b31966886..2d77cc571 100644
--- a/web/api/queries/average/average.h
+++ b/web/api/queries/average/average.h
@@ -6,10 +6,57 @@
#include "../query.h"
#include "../rrdr.h"
-void grouping_create_average(RRDR *r, const char *options __maybe_unused);
-void grouping_reset_average(RRDR *r);
-void grouping_free_average(RRDR *r);
-void grouping_add_average(RRDR *r, NETDATA_DOUBLE value);
-NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+// ----------------------------------------------------------------------------
+// average
+
+struct tg_average {
+ NETDATA_DOUBLE sum;
+ size_t count;
+};
+
+static inline void tg_average_create(RRDR *r, const char *options __maybe_unused) {
+ r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct tg_average));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+static inline void tg_average_reset(RRDR *r) {
+ struct tg_average *g = (struct tg_average *)r->time_grouping.data;
+ g->sum = 0;
+ g->count = 0;
+}
+
+static inline void tg_average_free(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
+}
+
+static inline void tg_average_add(RRDR *r, NETDATA_DOUBLE value) {
+ struct tg_average *g = (struct tg_average *)r->time_grouping.data;
+ g->sum += value;
+ g->count++;
+}
+
+static inline NETDATA_DOUBLE tg_average_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct tg_average *g = (struct tg_average *)r->time_grouping.data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ if(unlikely(r->time_grouping.resampling_group != 1))
+ value = g->sum / r->time_grouping.resampling_divisor;
+ else
+ value = g->sum / g->count;
+ }
+
+ g->sum = 0.0;
+ g->count = 0;
+
+ return value;
+}
#endif //NETDATA_API_QUERY_AVERAGE_H
diff --git a/web/api/queries/countif/README.md b/web/api/queries/countif/README.md
index 200a4c9ed..37b3f6423 100644
--- a/web/api/queries/countif/README.md
+++ b/web/api/queries/countif/README.md
@@ -1,6 +1,10 @@
<!--
title: "CountIf"
+sidebar_label: "CountIf"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/countif/README.md
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Queries"
-->
# CountIf
diff --git a/web/api/queries/countif/countif.c b/web/api/queries/countif/countif.c
index 369b20be9..8a3a1f50b 100644
--- a/web/api/queries/countif/countif.c
+++ b/web/api/queries/countif/countif.c
@@ -5,132 +5,3 @@
// ----------------------------------------------------------------------------
// countif
-struct grouping_countif {
- size_t (*comparison)(NETDATA_DOUBLE, NETDATA_DOUBLE);
- NETDATA_DOUBLE target;
- size_t count;
- size_t matched;
-};
-
-static size_t countif_equal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
- return (v == target);
-}
-
-static size_t countif_notequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
- return (v != target);
-}
-
-static size_t countif_less(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
- return (v < target);
-}
-
-static size_t countif_lessequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
- return (v <= target);
-}
-
-static size_t countif_greater(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
- return (v > target);
-}
-
-static size_t countif_greaterequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
- return (v >= target);
-}
-
-void grouping_create_countif(RRDR *r, const char *options __maybe_unused) {
- struct grouping_countif *g = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_countif));
- r->internal.grouping_data = g;
-
- if(options && *options) {
- // skip any leading spaces
- while(isspace(*options)) options++;
-
- // find the comparison function
- switch(*options) {
- case '!':
- options++;
- if(*options != '=' && *options != ':')
- options--;
- g->comparison = countif_notequal;
- break;
-
- case '>':
- options++;
- if(*options == '=' || *options == ':') {
- g->comparison = countif_greaterequal;
- }
- else {
- options--;
- g->comparison = countif_greater;
- }
- break;
-
- case '<':
- options++;
- if(*options == '>') {
- g->comparison = countif_notequal;
- }
- else if(*options == '=' || *options == ':') {
- g->comparison = countif_lessequal;
- }
- else {
- options--;
- g->comparison = countif_less;
- }
- break;
-
- default:
- case '=':
- case ':':
- g->comparison = countif_equal;
- break;
- }
- if(*options) options++;
-
- // skip everything up to the first digit
- while(isspace(*options)) options++;
-
- g->target = str2ndd(options, NULL);
- }
- else {
- g->target = 0.0;
- g->comparison = countif_equal;
- }
-}
-
-// resets when switches dimensions
-// so, clear everything to restart
-void grouping_reset_countif(RRDR *r) {
- struct grouping_countif *g = (struct grouping_countif *)r->internal.grouping_data;
- g->matched = 0;
- g->count = 0;
-}
-
-void grouping_free_countif(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
- r->internal.grouping_data = NULL;
-}
-
-void grouping_add_countif(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_countif *g = (struct grouping_countif *)r->internal.grouping_data;
- g->matched += g->comparison(value, g->target);
- g->count++;
-}
-
-NETDATA_DOUBLE grouping_flush_countif(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_countif *g = (struct grouping_countif *)r->internal.grouping_data;
-
- NETDATA_DOUBLE value;
-
- if(unlikely(!g->count)) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
- else {
- value = (NETDATA_DOUBLE)g->matched * 100 / (NETDATA_DOUBLE)g->count;
- }
-
- g->matched = 0;
- g->count = 0;
-
- return value;
-}
diff --git a/web/api/queries/countif/countif.h b/web/api/queries/countif/countif.h
index dfe805658..896b9d873 100644
--- a/web/api/queries/countif/countif.h
+++ b/web/api/queries/countif/countif.h
@@ -6,10 +6,143 @@
#include "../query.h"
#include "../rrdr.h"
-void grouping_create_countif(RRDR *r, const char *options __maybe_unused);
-void grouping_reset_countif(RRDR *r);
-void grouping_free_countif(RRDR *r);
-void grouping_add_countif(RRDR *r, NETDATA_DOUBLE value);
-NETDATA_DOUBLE grouping_flush_countif(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+enum tg_countif_cmp {
+ TG_COUNTIF_EQUAL,
+ TG_COUNTIF_NOTEQUAL,
+ TG_COUNTIF_LESS,
+ TG_COUNTIF_LESSEQUAL,
+ TG_COUNTIF_GREATER,
+ TG_COUNTIF_GREATEREQUAL,
+};
+
+struct tg_countif {
+ enum tg_countif_cmp comparison;
+ NETDATA_DOUBLE target;
+ size_t count;
+ size_t matched;
+};
+
+static inline void tg_countif_create(RRDR *r, const char *options __maybe_unused) {
+ struct tg_countif *g = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct tg_countif));
+ r->time_grouping.data = g;
+
+ if(options && *options) {
+ // skip any leading spaces
+ while(isspace(*options)) options++;
+
+ // find the comparison function
+ switch(*options) {
+ case '!':
+ options++;
+ if(*options != '=' && *options != ':')
+ options--;
+ g->comparison = TG_COUNTIF_NOTEQUAL;
+ break;
+
+ case '>':
+ options++;
+ if(*options == '=' || *options == ':') {
+ g->comparison = TG_COUNTIF_GREATEREQUAL;
+ }
+ else {
+ options--;
+ g->comparison = TG_COUNTIF_GREATER;
+ }
+ break;
+
+ case '<':
+ options++;
+ if(*options == '>') {
+ g->comparison = TG_COUNTIF_NOTEQUAL;
+ }
+ else if(*options == '=' || *options == ':') {
+ g->comparison = TG_COUNTIF_LESSEQUAL;
+ }
+ else {
+ options--;
+ g->comparison = TG_COUNTIF_LESS;
+ }
+ break;
+
+ default:
+ case '=':
+ case ':':
+ g->comparison = TG_COUNTIF_EQUAL;
+ break;
+ }
+ if(*options) options++;
+
+ // skip everything up to the first digit
+ while(isspace(*options)) options++;
+
+ g->target = str2ndd(options, NULL);
+ }
+ else {
+ g->target = 0.0;
+ g->comparison = TG_COUNTIF_EQUAL;
+ }
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+static inline void tg_countif_reset(RRDR *r) {
+ struct tg_countif *g = (struct tg_countif *)r->time_grouping.data;
+ g->matched = 0;
+ g->count = 0;
+}
+
+static inline void tg_countif_free(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
+}
+
+static inline void tg_countif_add(RRDR *r, NETDATA_DOUBLE value) {
+ struct tg_countif *g = (struct tg_countif *)r->time_grouping.data;
+ switch(g->comparison) {
+ case TG_COUNTIF_GREATER:
+ if(value > g->target) g->matched++;
+ break;
+
+ case TG_COUNTIF_GREATEREQUAL:
+ if(value >= g->target) g->matched++;
+ break;
+
+ case TG_COUNTIF_LESS:
+ if(value < g->target) g->matched++;
+ break;
+
+ case TG_COUNTIF_LESSEQUAL:
+ if(value <= g->target) g->matched++;
+ break;
+
+ case TG_COUNTIF_EQUAL:
+ if(value == g->target) g->matched++;
+ break;
+
+ case TG_COUNTIF_NOTEQUAL:
+ if(value != g->target) g->matched++;
+ break;
+ }
+ g->count++;
+}
+
+static inline NETDATA_DOUBLE tg_countif_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct tg_countif *g = (struct tg_countif *)r->time_grouping.data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = (NETDATA_DOUBLE)g->matched * 100 / (NETDATA_DOUBLE)g->count;
+ }
+
+ g->matched = 0;
+ g->count = 0;
+
+ return value;
+}
#endif //NETDATA_API_QUERY_COUNTIF_H
diff --git a/web/api/queries/des/README.md b/web/api/queries/des/README.md
index 33c5f1a0c..b12751a40 100644
--- a/web/api/queries/des/README.md
+++ b/web/api/queries/des/README.md
@@ -1,6 +1,10 @@
<!--
title: "double exponential smoothing"
+sidebar_label: "double exponential smoothing"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/des/README.md
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Queries"
-->
# double exponential smoothing
diff --git a/web/api/queries/des/des.c b/web/api/queries/des/des.c
index a6c4e4051..d0e234e23 100644
--- a/web/api/queries/des/des.c
+++ b/web/api/queries/des/des.c
@@ -6,132 +6,3 @@
// ----------------------------------------------------------------------------
// single exponential smoothing
-
-struct grouping_des {
- NETDATA_DOUBLE alpha;
- NETDATA_DOUBLE alpha_other;
- NETDATA_DOUBLE beta;
- NETDATA_DOUBLE beta_other;
-
- NETDATA_DOUBLE level;
- NETDATA_DOUBLE trend;
-
- size_t count;
-};
-
-static size_t max_window_size = 15;
-
-void grouping_init_des(void) {
- long long ret = config_get_number(CONFIG_SECTION_WEB, "des max window", (long long)max_window_size);
- if(ret <= 1) {
- config_set_number(CONFIG_SECTION_WEB, "des max window", (long long)max_window_size);
- }
- else {
- max_window_size = (size_t) ret;
- }
-}
-
-static inline NETDATA_DOUBLE window(RRDR *r, struct grouping_des *g) {
- (void)g;
-
- NETDATA_DOUBLE points;
- if(r->group == 1) {
- // provide a running DES
- points = (NETDATA_DOUBLE)r->internal.points_wanted;
- }
- else {
- // provide a SES with flush points
- points = (NETDATA_DOUBLE)r->group;
- }
-
- // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
- // A commonly used value for alpha is 2 / (N + 1)
- return (points > (NETDATA_DOUBLE)max_window_size) ? (NETDATA_DOUBLE)max_window_size : points;
-}
-
-static inline void set_alpha(RRDR *r, struct grouping_des *g) {
- // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
- // A commonly used value for alpha is 2 / (N + 1)
-
- g->alpha = 2.0 / (window(r, g) + 1.0);
- g->alpha_other = 1.0 - g->alpha;
-
- //info("alpha for chart '%s' is " CALCULATED_NUMBER_FORMAT, r->st->name, g->alpha);
-}
-
-static inline void set_beta(RRDR *r, struct grouping_des *g) {
- // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
- // A commonly used value for alpha is 2 / (N + 1)
-
- g->beta = 2.0 / (window(r, g) + 1.0);
- g->beta_other = 1.0 - g->beta;
-
- //info("beta for chart '%s' is " CALCULATED_NUMBER_FORMAT, r->st->name, g->beta);
-}
-
-void grouping_create_des(RRDR *r, const char *options __maybe_unused) {
- struct grouping_des *g = (struct grouping_des *)onewayalloc_mallocz(r->internal.owa, sizeof(struct grouping_des));
- set_alpha(r, g);
- set_beta(r, g);
- g->level = 0.0;
- g->trend = 0.0;
- g->count = 0;
- r->internal.grouping_data = g;
-}
-
-// resets when switches dimensions
-// so, clear everything to restart
-void grouping_reset_des(RRDR *r) {
- struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data;
- g->level = 0.0;
- g->trend = 0.0;
- g->count = 0;
-
- // fprintf(stderr, "\nDES: ");
-
-}
-
-void grouping_free_des(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
- r->internal.grouping_data = NULL;
-}
-
-void grouping_add_des(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data;
-
- if(likely(g->count > 0)) {
- // we have at least a number so far
-
- if(unlikely(g->count == 1)) {
- // the second value we got
- g->trend = value - g->trend;
- g->level = value;
- }
-
- // for the values, except the first
- NETDATA_DOUBLE last_level = g->level;
- g->level = (g->alpha * value) + (g->alpha_other * (g->level + g->trend));
- g->trend = (g->beta * (g->level - last_level)) + (g->beta_other * g->trend);
- }
- else {
- // the first value we got
- g->level = g->trend = value;
- }
-
- g->count++;
-
- //fprintf(stderr, "value: " CALCULATED_NUMBER_FORMAT ", level: " CALCULATED_NUMBER_FORMAT ", trend: " CALCULATED_NUMBER_FORMAT "\n", value, g->level, g->trend);
-}
-
-NETDATA_DOUBLE grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data;
-
- if(unlikely(!g->count || !netdata_double_isnumber(g->level))) {
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- return 0.0;
- }
-
- //fprintf(stderr, " RESULT for %zu values = " CALCULATED_NUMBER_FORMAT " \n", g->count, g->level);
-
- return g->level;
-}
diff --git a/web/api/queries/des/des.h b/web/api/queries/des/des.h
index 05fa01b34..3153d497c 100644
--- a/web/api/queries/des/des.h
+++ b/web/api/queries/des/des.h
@@ -6,12 +6,133 @@
#include "../query.h"
#include "../rrdr.h"
-void grouping_init_des(void);
+struct tg_des {
+ NETDATA_DOUBLE alpha;
+ NETDATA_DOUBLE alpha_other;
+ NETDATA_DOUBLE beta;
+ NETDATA_DOUBLE beta_other;
-void grouping_create_des(RRDR *r, const char *options __maybe_unused);
-void grouping_reset_des(RRDR *r);
-void grouping_free_des(RRDR *r);
-void grouping_add_des(RRDR *r, NETDATA_DOUBLE value);
-NETDATA_DOUBLE grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+ NETDATA_DOUBLE level;
+ NETDATA_DOUBLE trend;
+
+ size_t count;
+};
+
+static size_t tg_des_max_window_size = 15;
+
+static inline void tg_des_init(void) {
+ long long ret = config_get_number(CONFIG_SECTION_WEB, "des max tg_des_window", (long long)tg_des_max_window_size);
+ if(ret <= 1) {
+ config_set_number(CONFIG_SECTION_WEB, "des max tg_des_window", (long long)tg_des_max_window_size);
+ }
+ else {
+ tg_des_max_window_size = (size_t) ret;
+ }
+}
+
+static inline NETDATA_DOUBLE tg_des_window(RRDR *r, struct tg_des *g) {
+ (void)g;
+
+ NETDATA_DOUBLE points;
+ if(r->view.group == 1) {
+ // provide a running DES
+ points = (NETDATA_DOUBLE)r->time_grouping.points_wanted;
+ }
+ else {
+ // provide a SES with flush points
+ points = (NETDATA_DOUBLE)r->view.group;
+ }
+
+ // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ // A commonly used value for alpha is 2 / (N + 1)
+ return (points > (NETDATA_DOUBLE)tg_des_max_window_size) ? (NETDATA_DOUBLE)tg_des_max_window_size : points;
+}
+
+static inline void tg_des_set_alpha(RRDR *r, struct tg_des *g) {
+ // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ // A commonly used value for alpha is 2 / (N + 1)
+
+ g->alpha = 2.0 / (tg_des_window(r, g) + 1.0);
+ g->alpha_other = 1.0 - g->alpha;
+
+ //info("alpha for chart '%s' is " CALCULATED_NUMBER_FORMAT, r->st->name, g->alpha);
+}
+
+static inline void tg_des_set_beta(RRDR *r, struct tg_des *g) {
+ // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ // A commonly used value for alpha is 2 / (N + 1)
+
+ g->beta = 2.0 / (tg_des_window(r, g) + 1.0);
+ g->beta_other = 1.0 - g->beta;
+
+ //info("beta for chart '%s' is " CALCULATED_NUMBER_FORMAT, r->st->name, g->beta);
+}
+
+static inline void tg_des_create(RRDR *r, const char *options __maybe_unused) {
+ struct tg_des *g = (struct tg_des *)onewayalloc_mallocz(r->internal.owa, sizeof(struct tg_des));
+ tg_des_set_alpha(r, g);
+ tg_des_set_beta(r, g);
+ g->level = 0.0;
+ g->trend = 0.0;
+ g->count = 0;
+ r->time_grouping.data = g;
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+static inline void tg_des_reset(RRDR *r) {
+ struct tg_des *g = (struct tg_des *)r->time_grouping.data;
+ g->level = 0.0;
+ g->trend = 0.0;
+ g->count = 0;
+
+ // fprintf(stderr, "\nDES: ");
+
+}
+
+static inline void tg_des_free(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
+}
+
+static inline void tg_des_add(RRDR *r, NETDATA_DOUBLE value) {
+ struct tg_des *g = (struct tg_des *)r->time_grouping.data;
+
+ if(likely(g->count > 0)) {
+ // we have at least a number so far
+
+ if(unlikely(g->count == 1)) {
+ // the second value we got
+ g->trend = value - g->trend;
+ g->level = value;
+ }
+
+ // for the values, except the first
+ NETDATA_DOUBLE last_level = g->level;
+ g->level = (g->alpha * value) + (g->alpha_other * (g->level + g->trend));
+ g->trend = (g->beta * (g->level - last_level)) + (g->beta_other * g->trend);
+ }
+ else {
+ // the first value we got
+ g->level = g->trend = value;
+ }
+
+ g->count++;
+
+ //fprintf(stderr, "value: " CALCULATED_NUMBER_FORMAT ", level: " CALCULATED_NUMBER_FORMAT ", trend: " CALCULATED_NUMBER_FORMAT "\n", value, g->level, g->trend);
+}
+
+static inline NETDATA_DOUBLE tg_des_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct tg_des *g = (struct tg_des *)r->time_grouping.data;
+
+ if(unlikely(!g->count || !netdata_double_isnumber(g->level))) {
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ return 0.0;
+ }
+
+ //fprintf(stderr, " RESULT for %zu values = " CALCULATED_NUMBER_FORMAT " \n", g->count, g->level);
+
+ return g->level;
+}
#endif //NETDATA_API_QUERIES_DES_H
diff --git a/web/api/queries/incremental_sum/README.md b/web/api/queries/incremental_sum/README.md
index 44301172e..9b89f3188 100644
--- a/web/api/queries/incremental_sum/README.md
+++ b/web/api/queries/incremental_sum/README.md
@@ -1,6 +1,10 @@
<!--
title: "Incremental Sum (`incremental_sum`)"
+sidebar_label: "Incremental Sum (`incremental_sum`)"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/incremental_sum/README.md
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Queries"
-->
# Incremental Sum (`incremental_sum`)
diff --git a/web/api/queries/incremental_sum/incremental_sum.c b/web/api/queries/incremental_sum/incremental_sum.c
index afca530c3..88072f297 100644
--- a/web/api/queries/incremental_sum/incremental_sum.c
+++ b/web/api/queries/incremental_sum/incremental_sum.c
@@ -5,62 +5,3 @@
// ----------------------------------------------------------------------------
// incremental sum
-struct grouping_incremental_sum {
- NETDATA_DOUBLE first;
- NETDATA_DOUBLE last;
- size_t count;
-};
-
-void grouping_create_incremental_sum(RRDR *r, const char *options __maybe_unused) {
- r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_incremental_sum));
-}
-
-// resets when switches dimensions
-// so, clear everything to restart
-void grouping_reset_incremental_sum(RRDR *r) {
- struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data;
- g->first = 0;
- g->last = 0;
- g->count = 0;
-}
-
-void grouping_free_incremental_sum(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
- r->internal.grouping_data = NULL;
-}
-
-void grouping_add_incremental_sum(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data;
-
- if(unlikely(!g->count)) {
- g->first = value;
- g->count++;
- }
- else {
- g->last = value;
- g->count++;
- }
-}
-
-NETDATA_DOUBLE grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data;
-
- NETDATA_DOUBLE value;
-
- if(unlikely(!g->count)) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
- else if(unlikely(g->count == 1)) {
- value = 0.0;
- }
- else {
- value = g->last - g->first;
- }
-
- g->first = 0.0;
- g->last = 0.0;
- g->count = 0;
-
- return value;
-}
diff --git a/web/api/queries/incremental_sum/incremental_sum.h b/web/api/queries/incremental_sum/incremental_sum.h
index c24507fcf..dd6483b2c 100644
--- a/web/api/queries/incremental_sum/incremental_sum.h
+++ b/web/api/queries/incremental_sum/incremental_sum.h
@@ -6,10 +6,64 @@
#include "../query.h"
#include "../rrdr.h"
-void grouping_create_incremental_sum(RRDR *r, const char *options __maybe_unused);
-void grouping_reset_incremental_sum(RRDR *r);
-void grouping_free_incremental_sum(RRDR *r);
-void grouping_add_incremental_sum(RRDR *r, NETDATA_DOUBLE value);
-NETDATA_DOUBLE grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+struct tg_incremental_sum {
+ NETDATA_DOUBLE first;
+ NETDATA_DOUBLE last;
+ size_t count;
+};
+
+static inline void tg_incremental_sum_create(RRDR *r, const char *options __maybe_unused) {
+ r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct tg_incremental_sum));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+static inline void tg_incremental_sum_reset(RRDR *r) {
+ struct tg_incremental_sum *g = (struct tg_incremental_sum *)r->time_grouping.data;
+ g->first = 0;
+ g->last = 0;
+ g->count = 0;
+}
+
+static inline void tg_incremental_sum_free(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
+}
+
+static inline void tg_incremental_sum_add(RRDR *r, NETDATA_DOUBLE value) {
+ struct tg_incremental_sum *g = (struct tg_incremental_sum *)r->time_grouping.data;
+
+ if(unlikely(!g->count)) {
+ g->first = value;
+ g->count++;
+ }
+ else {
+ g->last = value;
+ g->count++;
+ }
+}
+
+static inline NETDATA_DOUBLE tg_incremental_sum_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct tg_incremental_sum *g = (struct tg_incremental_sum *)r->time_grouping.data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else if(unlikely(g->count == 1)) {
+ value = 0.0;
+ }
+ else {
+ value = g->last - g->first;
+ }
+
+ g->first = 0.0;
+ g->last = 0.0;
+ g->count = 0;
+
+ return value;
+}
#endif //NETDATA_API_QUERY_INCREMENTAL_SUM_H
diff --git a/web/api/queries/max/README.md b/web/api/queries/max/README.md
index 48da7cf08..82749c4ab 100644
--- a/web/api/queries/max/README.md
+++ b/web/api/queries/max/README.md
@@ -1,6 +1,10 @@
<!--
title: "Max"
+sidebar_label: "Max"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/max/README.md
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Queries"
-->
# Max
diff --git a/web/api/queries/max/max.c b/web/api/queries/max/max.c
index 73cf9fa66..cc5999a29 100644
--- a/web/api/queries/max/max.c
+++ b/web/api/queries/max/max.c
@@ -5,53 +5,3 @@
// ----------------------------------------------------------------------------
// max
-struct grouping_max {
- NETDATA_DOUBLE max;
- size_t count;
-};
-
-void grouping_create_max(RRDR *r, const char *options __maybe_unused) {
- r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_max));
-}
-
-// resets when switches dimensions
-// so, clear everything to restart
-void grouping_reset_max(RRDR *r) {
- struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data;
- g->max = 0;
- g->count = 0;
-}
-
-void grouping_free_max(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
- r->internal.grouping_data = NULL;
-}
-
-void grouping_add_max(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data;
-
- if(!g->count || fabsndd(value) > fabsndd(g->max)) {
- g->max = value;
- g->count++;
- }
-}
-
-NETDATA_DOUBLE grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data;
-
- NETDATA_DOUBLE value;
-
- if(unlikely(!g->count)) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
- else {
- value = g->max;
- }
-
- g->max = 0.0;
- g->count = 0;
-
- return value;
-}
-
diff --git a/web/api/queries/max/max.h b/web/api/queries/max/max.h
index e2427d26d..c26bb79ad 100644
--- a/web/api/queries/max/max.h
+++ b/web/api/queries/max/max.h
@@ -6,10 +6,54 @@
#include "../query.h"
#include "../rrdr.h"
-void grouping_create_max(RRDR *r, const char *options __maybe_unused);
-void grouping_reset_max(RRDR *r);
-void grouping_free_max(RRDR *r);
-void grouping_add_max(RRDR *r, NETDATA_DOUBLE value);
-NETDATA_DOUBLE grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+struct tg_max {
+ NETDATA_DOUBLE max;
+ size_t count;
+};
+
+static inline void tg_max_create(RRDR *r, const char *options __maybe_unused) {
+ r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct tg_max));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+static inline void tg_max_reset(RRDR *r) {
+ struct tg_max *g = (struct tg_max *)r->time_grouping.data;
+ g->max = 0;
+ g->count = 0;
+}
+
+static inline void tg_max_free(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
+}
+
+static inline void tg_max_add(RRDR *r, NETDATA_DOUBLE value) {
+ struct tg_max *g = (struct tg_max *)r->time_grouping.data;
+
+ if(!g->count || fabsndd(value) > fabsndd(g->max)) {
+ g->max = value;
+ g->count++;
+ }
+}
+
+static inline NETDATA_DOUBLE tg_max_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct tg_max *g = (struct tg_max *)r->time_grouping.data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = g->max;
+ }
+
+ g->max = 0.0;
+ g->count = 0;
+
+ return value;
+}
#endif //NETDATA_API_QUERY_MAX_H
diff --git a/web/api/queries/median/README.md b/web/api/queries/median/README.md
index 5600284c2..15549b3b5 100644
--- a/web/api/queries/median/README.md
+++ b/web/api/queries/median/README.md
@@ -1,7 +1,11 @@
<!--
title: "Median"
+sidebar_label: "Median"
description: "Use median in API queries and health entities to find the 'middle' value from a sample, eliminating any unwanted spikes in the returned metrics."
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/median/README.md
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Queries"
-->
# Median
diff --git a/web/api/queries/median/median.c b/web/api/queries/median/median.c
index 40fd4ec3a..9865b485c 100644
--- a/web/api/queries/median/median.c
+++ b/web/api/queries/median/median.c
@@ -4,137 +4,3 @@
// ----------------------------------------------------------------------------
// median
-
-struct grouping_median {
- size_t series_size;
- size_t next_pos;
- NETDATA_DOUBLE percent;
-
- NETDATA_DOUBLE *series;
-};
-
-void grouping_create_median_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
- long entries = r->group;
- if(entries < 10) entries = 10;
-
- struct grouping_median *g = (struct grouping_median *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_median));
- g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
- g->series_size = (size_t)entries;
-
- g->percent = def;
- if(options && *options) {
- g->percent = str2ndd(options, NULL);
- if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
- if(g->percent < 0.0) g->percent = 0.0;
- if(g->percent > 50.0) g->percent = 50.0;
- }
-
- g->percent = g->percent / 100.0;
- r->internal.grouping_data = g;
-}
-
-void grouping_create_median(RRDR *r, const char *options) {
- grouping_create_median_internal(r, options, 0.0);
-}
-void grouping_create_trimmed_median1(RRDR *r, const char *options) {
- grouping_create_median_internal(r, options, 1.0);
-}
-void grouping_create_trimmed_median2(RRDR *r, const char *options) {
- grouping_create_median_internal(r, options, 2.0);
-}
-void grouping_create_trimmed_median3(RRDR *r, const char *options) {
- grouping_create_median_internal(r, options, 3.0);
-}
-void grouping_create_trimmed_median5(RRDR *r, const char *options) {
- grouping_create_median_internal(r, options, 5.0);
-}
-void grouping_create_trimmed_median10(RRDR *r, const char *options) {
- grouping_create_median_internal(r, options, 10.0);
-}
-void grouping_create_trimmed_median15(RRDR *r, const char *options) {
- grouping_create_median_internal(r, options, 15.0);
-}
-void grouping_create_trimmed_median20(RRDR *r, const char *options) {
- grouping_create_median_internal(r, options, 20.0);
-}
-void grouping_create_trimmed_median25(RRDR *r, const char *options) {
- grouping_create_median_internal(r, options, 25.0);
-}
-
-// resets when switches dimensions
-// so, clear everything to restart
-void grouping_reset_median(RRDR *r) {
- struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
- g->next_pos = 0;
-}
-
-void grouping_free_median(RRDR *r) {
- struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
- if(g) onewayalloc_freez(r->internal.owa, g->series);
-
- onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
- r->internal.grouping_data = NULL;
-}
-
-void grouping_add_median(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
-
- if(unlikely(g->next_pos >= g->series_size)) {
- g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
- g->series_size *= 2;
- }
-
- g->series[g->next_pos++] = value;
-}
-
-NETDATA_DOUBLE grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
-
- size_t available_slots = g->next_pos;
- NETDATA_DOUBLE value;
-
- if(unlikely(!available_slots)) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
- else if(available_slots == 1) {
- value = g->series[0];
- }
- else {
- sort_series(g->series, available_slots);
-
- size_t start_slot = 0;
- size_t end_slot = available_slots - 1;
-
- if(g->percent > 0.0) {
- NETDATA_DOUBLE min = g->series[0];
- NETDATA_DOUBLE max = g->series[available_slots - 1];
- NETDATA_DOUBLE delta = (max - min) * g->percent;
-
- NETDATA_DOUBLE wanted_min = min + delta;
- NETDATA_DOUBLE wanted_max = max - delta;
-
- for (start_slot = 0; start_slot < available_slots; start_slot++)
- if (g->series[start_slot] >= wanted_min) break;
-
- for (end_slot = available_slots - 1; end_slot > start_slot; end_slot--)
- if (g->series[end_slot] <= wanted_max) break;
- }
-
- if(start_slot == end_slot)
- value = g->series[start_slot];
- else
- value = median_on_sorted_series(&g->series[start_slot], end_slot - start_slot + 1);
- }
-
- if(unlikely(!netdata_double_isnumber(value))) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
-
- //log_series_to_stderr(g->series, g->next_pos, value, "median");
-
- g->next_pos = 0;
-
- return value;
-}
diff --git a/web/api/queries/median/median.h b/web/api/queries/median/median.h
index 9fc159db4..3d6d35925 100644
--- a/web/api/queries/median/median.h
+++ b/web/api/queries/median/median.h
@@ -6,18 +6,138 @@
#include "../query.h"
#include "../rrdr.h"
-void grouping_create_median(RRDR *r, const char *options);
-void grouping_create_trimmed_median1(RRDR *r, const char *options);
-void grouping_create_trimmed_median2(RRDR *r, const char *options);
-void grouping_create_trimmed_median3(RRDR *r, const char *options);
-void grouping_create_trimmed_median5(RRDR *r, const char *options);
-void grouping_create_trimmed_median10(RRDR *r, const char *options);
-void grouping_create_trimmed_median15(RRDR *r, const char *options);
-void grouping_create_trimmed_median20(RRDR *r, const char *options);
-void grouping_create_trimmed_median25(RRDR *r, const char *options);
-void grouping_reset_median(RRDR *r);
-void grouping_free_median(RRDR *r);
-void grouping_add_median(RRDR *r, NETDATA_DOUBLE value);
-NETDATA_DOUBLE grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+struct tg_median {
+ size_t series_size;
+ size_t next_pos;
+ NETDATA_DOUBLE percent;
+
+ NETDATA_DOUBLE *series;
+};
+
+static inline void tg_median_create_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
+ long entries = r->view.group;
+ if(entries < 10) entries = 10;
+
+ struct tg_median *g = (struct tg_median *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct tg_median));
+ g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
+ g->series_size = (size_t)entries;
+
+ g->percent = def;
+ if(options && *options) {
+ g->percent = str2ndd(options, NULL);
+ if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
+ if(g->percent < 0.0) g->percent = 0.0;
+ if(g->percent > 50.0) g->percent = 50.0;
+ }
+
+ g->percent = g->percent / 100.0;
+ r->time_grouping.data = g;
+}
+
+static inline void tg_median_create(RRDR *r, const char *options) {
+ tg_median_create_internal(r, options, 0.0);
+}
+static inline void tg_median_create_trimmed_1(RRDR *r, const char *options) {
+ tg_median_create_internal(r, options, 1.0);
+}
+static inline void tg_median_create_trimmed_2(RRDR *r, const char *options) {
+ tg_median_create_internal(r, options, 2.0);
+}
+static inline void tg_median_create_trimmed_3(RRDR *r, const char *options) {
+ tg_median_create_internal(r, options, 3.0);
+}
+static inline void tg_median_create_trimmed_5(RRDR *r, const char *options) {
+ tg_median_create_internal(r, options, 5.0);
+}
+static inline void tg_median_create_trimmed_10(RRDR *r, const char *options) {
+ tg_median_create_internal(r, options, 10.0);
+}
+static inline void tg_median_create_trimmed_15(RRDR *r, const char *options) {
+ tg_median_create_internal(r, options, 15.0);
+}
+static inline void tg_median_create_trimmed_20(RRDR *r, const char *options) {
+ tg_median_create_internal(r, options, 20.0);
+}
+static inline void tg_median_create_trimmed_25(RRDR *r, const char *options) {
+ tg_median_create_internal(r, options, 25.0);
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+static inline void tg_median_reset(RRDR *r) {
+ struct tg_median *g = (struct tg_median *)r->time_grouping.data;
+ g->next_pos = 0;
+}
+
+static inline void tg_median_free(RRDR *r) {
+ struct tg_median *g = (struct tg_median *)r->time_grouping.data;
+ if(g) onewayalloc_freez(r->internal.owa, g->series);
+
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
+}
+
+static inline void tg_median_add(RRDR *r, NETDATA_DOUBLE value) {
+ struct tg_median *g = (struct tg_median *)r->time_grouping.data;
+
+ if(unlikely(g->next_pos >= g->series_size)) {
+ g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
+ g->series_size *= 2;
+ }
+
+ g->series[g->next_pos++] = value;
+}
+
+static inline NETDATA_DOUBLE tg_median_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct tg_median *g = (struct tg_median *)r->time_grouping.data;
+
+ size_t available_slots = g->next_pos;
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!available_slots)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else if(available_slots == 1) {
+ value = g->series[0];
+ }
+ else {
+ sort_series(g->series, available_slots);
+
+ size_t start_slot = 0;
+ size_t end_slot = available_slots - 1;
+
+ if(g->percent > 0.0) {
+ NETDATA_DOUBLE min = g->series[0];
+ NETDATA_DOUBLE max = g->series[available_slots - 1];
+ NETDATA_DOUBLE delta = (max - min) * g->percent;
+
+ NETDATA_DOUBLE wanted_min = min + delta;
+ NETDATA_DOUBLE wanted_max = max - delta;
+
+ for (start_slot = 0; start_slot < available_slots; start_slot++)
+ if (g->series[start_slot] >= wanted_min) break;
+
+ for (end_slot = available_slots - 1; end_slot > start_slot; end_slot--)
+ if (g->series[end_slot] <= wanted_max) break;
+ }
+
+ if(start_slot == end_slot)
+ value = g->series[start_slot];
+ else
+ value = median_on_sorted_series(&g->series[start_slot], end_slot - start_slot + 1);
+ }
+
+ if(unlikely(!netdata_double_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ //log_series_to_stderr(g->series, g->next_pos, value, "median");
+
+ g->next_pos = 0;
+
+ return value;
+}
#endif //NETDATA_API_QUERIES_MEDIAN_H
diff --git a/web/api/queries/min/README.md b/web/api/queries/min/README.md
index 495523c04..cf63aaa01 100644
--- a/web/api/queries/min/README.md
+++ b/web/api/queries/min/README.md
@@ -1,6 +1,10 @@
<!--
title: "Min"
+sidebar_label: "Min"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/min/README.md
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Queries"
-->
# Min
diff --git a/web/api/queries/min/min.c b/web/api/queries/min/min.c
index 1752e9e0c..cefa7cf31 100644
--- a/web/api/queries/min/min.c
+++ b/web/api/queries/min/min.c
@@ -5,53 +5,3 @@
// ----------------------------------------------------------------------------
// min
-struct grouping_min {
- NETDATA_DOUBLE min;
- size_t count;
-};
-
-void grouping_create_min(RRDR *r, const char *options __maybe_unused) {
- r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_min));
-}
-
-// resets when switches dimensions
-// so, clear everything to restart
-void grouping_reset_min(RRDR *r) {
- struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data;
- g->min = 0;
- g->count = 0;
-}
-
-void grouping_free_min(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
- r->internal.grouping_data = NULL;
-}
-
-void grouping_add_min(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data;
-
- if(!g->count || fabsndd(value) < fabsndd(g->min)) {
- g->min = value;
- g->count++;
- }
-}
-
-NETDATA_DOUBLE grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data;
-
- NETDATA_DOUBLE value;
-
- if(unlikely(!g->count)) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
- else {
- value = g->min;
- }
-
- g->min = 0.0;
- g->count = 0;
-
- return value;
-}
-
diff --git a/web/api/queries/min/min.h b/web/api/queries/min/min.h
index dcdfe252f..3c53dfd1d 100644
--- a/web/api/queries/min/min.h
+++ b/web/api/queries/min/min.h
@@ -6,10 +6,54 @@
#include "../query.h"
#include "../rrdr.h"
-void grouping_create_min(RRDR *r, const char *options __maybe_unused);
-void grouping_reset_min(RRDR *r);
-void grouping_free_min(RRDR *r);
-void grouping_add_min(RRDR *r, NETDATA_DOUBLE value);
-NETDATA_DOUBLE grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+struct tg_min {
+ NETDATA_DOUBLE min;
+ size_t count;
+};
+
+static inline void tg_min_create(RRDR *r, const char *options __maybe_unused) {
+ r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct tg_min));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+static inline void tg_min_reset(RRDR *r) {
+ struct tg_min *g = (struct tg_min *)r->time_grouping.data;
+ g->min = 0;
+ g->count = 0;
+}
+
+static inline void tg_min_free(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
+}
+
+static inline void tg_min_add(RRDR *r, NETDATA_DOUBLE value) {
+ struct tg_min *g = (struct tg_min *)r->time_grouping.data;
+
+ if(!g->count || fabsndd(value) < fabsndd(g->min)) {
+ g->min = value;
+ g->count++;
+ }
+}
+
+static inline NETDATA_DOUBLE tg_min_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct tg_min *g = (struct tg_min *)r->time_grouping.data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = g->min;
+ }
+
+ g->min = 0.0;
+ g->count = 0;
+
+ return value;
+}
#endif //NETDATA_API_QUERY_MIN_H
diff --git a/web/api/queries/percentile/README.md b/web/api/queries/percentile/README.md
index 70afc7420..19ec81ed6 100644
--- a/web/api/queries/percentile/README.md
+++ b/web/api/queries/percentile/README.md
@@ -1,7 +1,11 @@
<!--
title: "Percentile"
+sidebar_label: "Percentile"
description: "Use percentile in API queries and health entities to find the 'percentile' value from a sample, eliminating any unwanted spikes in the returned metrics."
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/percentile/README.md
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Queries"
-->
# Percentile
diff --git a/web/api/queries/percentile/percentile.c b/web/api/queries/percentile/percentile.c
index 88f8600dd..da3b32696 100644
--- a/web/api/queries/percentile/percentile.c
+++ b/web/api/queries/percentile/percentile.c
@@ -4,166 +4,3 @@
// ----------------------------------------------------------------------------
// median
-
-struct grouping_percentile {
- size_t series_size;
- size_t next_pos;
- NETDATA_DOUBLE percent;
-
- NETDATA_DOUBLE *series;
-};
-
-static void grouping_create_percentile_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
- long entries = r->group;
- if(entries < 10) entries = 10;
-
- struct grouping_percentile *g = (struct grouping_percentile *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_percentile));
- g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
- g->series_size = (size_t)entries;
-
- g->percent = def;
- if(options && *options) {
- g->percent = str2ndd(options, NULL);
- if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
- if(g->percent < 0.0) g->percent = 0.0;
- if(g->percent > 100.0) g->percent = 100.0;
- }
-
- g->percent = g->percent / 100.0;
- r->internal.grouping_data = g;
-}
-
-void grouping_create_percentile25(RRDR *r, const char *options) {
- grouping_create_percentile_internal(r, options, 25.0);
-}
-void grouping_create_percentile50(RRDR *r, const char *options) {
- grouping_create_percentile_internal(r, options, 50.0);
-}
-void grouping_create_percentile75(RRDR *r, const char *options) {
- grouping_create_percentile_internal(r, options, 75.0);
-}
-void grouping_create_percentile80(RRDR *r, const char *options) {
- grouping_create_percentile_internal(r, options, 80.0);
-}
-void grouping_create_percentile90(RRDR *r, const char *options) {
- grouping_create_percentile_internal(r, options, 90.0);
-}
-void grouping_create_percentile95(RRDR *r, const char *options) {
- grouping_create_percentile_internal(r, options, 95.0);
-}
-void grouping_create_percentile97(RRDR *r, const char *options) {
- grouping_create_percentile_internal(r, options, 97.0);
-}
-void grouping_create_percentile98(RRDR *r, const char *options) {
- grouping_create_percentile_internal(r, options, 98.0);
-}
-void grouping_create_percentile99(RRDR *r, const char *options) {
- grouping_create_percentile_internal(r, options, 99.0);
-}
-
-// resets when switches dimensions
-// so, clear everything to restart
-void grouping_reset_percentile(RRDR *r) {
- struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
- g->next_pos = 0;
-}
-
-void grouping_free_percentile(RRDR *r) {
- struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
- if(g) onewayalloc_freez(r->internal.owa, g->series);
-
- onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
- r->internal.grouping_data = NULL;
-}
-
-void grouping_add_percentile(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
-
- if(unlikely(g->next_pos >= g->series_size)) {
- g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
- g->series_size *= 2;
- }
-
- g->series[g->next_pos++] = value;
-}
-
-NETDATA_DOUBLE grouping_flush_percentile(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
-
- NETDATA_DOUBLE value;
- size_t available_slots = g->next_pos;
-
- if(unlikely(!available_slots)) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
- else if(available_slots == 1) {
- value = g->series[0];
- }
- else {
- sort_series(g->series, available_slots);
-
- NETDATA_DOUBLE min = g->series[0];
- NETDATA_DOUBLE max = g->series[available_slots - 1];
-
- if (min != max) {
- size_t slots_to_use = (size_t)((NETDATA_DOUBLE)available_slots * g->percent);
- if(!slots_to_use) slots_to_use = 1;
-
- NETDATA_DOUBLE percent_to_use = (NETDATA_DOUBLE)slots_to_use / (NETDATA_DOUBLE)available_slots;
- NETDATA_DOUBLE percent_delta = g->percent - percent_to_use;
-
- NETDATA_DOUBLE percent_interpolation_slot = 0.0;
- NETDATA_DOUBLE percent_last_slot = 0.0;
- if(percent_delta > 0.0) {
- NETDATA_DOUBLE percent_to_use_plus_1_slot = (NETDATA_DOUBLE)(slots_to_use + 1) / (NETDATA_DOUBLE)available_slots;
- NETDATA_DOUBLE percent_1slot = percent_to_use_plus_1_slot - percent_to_use;
-
- percent_interpolation_slot = percent_delta / percent_1slot;
- percent_last_slot = 1 - percent_interpolation_slot;
- }
-
- int start_slot, stop_slot, step, last_slot, interpolation_slot;
- if(min >= 0.0 && max >= 0.0) {
- start_slot = 0;
- stop_slot = start_slot + (int)slots_to_use;
- last_slot = stop_slot - 1;
- interpolation_slot = stop_slot;
- step = 1;
- }
- else {
- start_slot = (int)available_slots - 1;
- stop_slot = start_slot - (int)slots_to_use;
- last_slot = stop_slot + 1;
- interpolation_slot = stop_slot;
- step = -1;
- }
-
- value = 0.0;
- for(int slot = start_slot; slot != stop_slot ; slot += step)
- value += g->series[slot];
-
- size_t counted = slots_to_use;
- if(percent_interpolation_slot > 0.0 && interpolation_slot >= 0 && interpolation_slot < (int)available_slots) {
- value += g->series[interpolation_slot] * percent_interpolation_slot;
- value += g->series[last_slot] * percent_last_slot;
- counted++;
- }
-
- value = value / (NETDATA_DOUBLE)counted;
- }
- else
- value = min;
- }
-
- if(unlikely(!netdata_double_isnumber(value))) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
-
- //log_series_to_stderr(g->series, g->next_pos, value, "percentile");
-
- g->next_pos = 0;
-
- return value;
-}
diff --git a/web/api/queries/percentile/percentile.h b/web/api/queries/percentile/percentile.h
index 65e335c11..0532f9d3f 100644
--- a/web/api/queries/percentile/percentile.h
+++ b/web/api/queries/percentile/percentile.h
@@ -6,18 +6,167 @@
#include "../query.h"
#include "../rrdr.h"
-void grouping_create_percentile25(RRDR *r, const char *options);
-void grouping_create_percentile50(RRDR *r, const char *options);
-void grouping_create_percentile75(RRDR *r, const char *options);
-void grouping_create_percentile80(RRDR *r, const char *options);
-void grouping_create_percentile90(RRDR *r, const char *options);
-void grouping_create_percentile95(RRDR *r, const char *options);
-void grouping_create_percentile97(RRDR *r, const char *options);
-void grouping_create_percentile98(RRDR *r, const char *options);
-void grouping_create_percentile99(RRDR *r, const char *options );
-void grouping_reset_percentile(RRDR *r);
-void grouping_free_percentile(RRDR *r);
-void grouping_add_percentile(RRDR *r, NETDATA_DOUBLE value);
-NETDATA_DOUBLE grouping_flush_percentile(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+struct tg_percentile {
+ size_t series_size;
+ size_t next_pos;
+ NETDATA_DOUBLE percent;
+
+ NETDATA_DOUBLE *series;
+};
+
+static inline void tg_percentile_create_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
+ long entries = r->view.group;
+ if(entries < 10) entries = 10;
+
+ struct tg_percentile *g = (struct tg_percentile *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct tg_percentile));
+ g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
+ g->series_size = (size_t)entries;
+
+ g->percent = def;
+ if(options && *options) {
+ g->percent = str2ndd(options, NULL);
+ if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
+ if(g->percent < 0.0) g->percent = 0.0;
+ if(g->percent > 100.0) g->percent = 100.0;
+ }
+
+ g->percent = g->percent / 100.0;
+ r->time_grouping.data = g;
+}
+
+static inline void tg_percentile_create_25(RRDR *r, const char *options) {
+ tg_percentile_create_internal(r, options, 25.0);
+}
+static inline void tg_percentile_create_50(RRDR *r, const char *options) {
+ tg_percentile_create_internal(r, options, 50.0);
+}
+static inline void tg_percentile_create_75(RRDR *r, const char *options) {
+ tg_percentile_create_internal(r, options, 75.0);
+}
+static inline void tg_percentile_create_80(RRDR *r, const char *options) {
+ tg_percentile_create_internal(r, options, 80.0);
+}
+static inline void tg_percentile_create_90(RRDR *r, const char *options) {
+ tg_percentile_create_internal(r, options, 90.0);
+}
+static inline void tg_percentile_create_95(RRDR *r, const char *options) {
+ tg_percentile_create_internal(r, options, 95.0);
+}
+static inline void tg_percentile_create_97(RRDR *r, const char *options) {
+ tg_percentile_create_internal(r, options, 97.0);
+}
+static inline void tg_percentile_create_98(RRDR *r, const char *options) {
+ tg_percentile_create_internal(r, options, 98.0);
+}
+static inline void tg_percentile_create_99(RRDR *r, const char *options) {
+ tg_percentile_create_internal(r, options, 99.0);
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+static inline void tg_percentile_reset(RRDR *r) {
+ struct tg_percentile *g = (struct tg_percentile *)r->time_grouping.data;
+ g->next_pos = 0;
+}
+
+static inline void tg_percentile_free(RRDR *r) {
+ struct tg_percentile *g = (struct tg_percentile *)r->time_grouping.data;
+ if(g) onewayalloc_freez(r->internal.owa, g->series);
+
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
+}
+
+static inline void tg_percentile_add(RRDR *r, NETDATA_DOUBLE value) {
+ struct tg_percentile *g = (struct tg_percentile *)r->time_grouping.data;
+
+ if(unlikely(g->next_pos >= g->series_size)) {
+ g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
+ g->series_size *= 2;
+ }
+
+ g->series[g->next_pos++] = value;
+}
+
+static inline NETDATA_DOUBLE tg_percentile_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct tg_percentile *g = (struct tg_percentile *)r->time_grouping.data;
+
+ NETDATA_DOUBLE value;
+ size_t available_slots = g->next_pos;
+
+ if(unlikely(!available_slots)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else if(available_slots == 1) {
+ value = g->series[0];
+ }
+ else {
+ sort_series(g->series, available_slots);
+
+ NETDATA_DOUBLE min = g->series[0];
+ NETDATA_DOUBLE max = g->series[available_slots - 1];
+
+ if (min != max) {
+ size_t slots_to_use = (size_t)((NETDATA_DOUBLE)available_slots * g->percent);
+ if(!slots_to_use) slots_to_use = 1;
+
+ NETDATA_DOUBLE percent_to_use = (NETDATA_DOUBLE)slots_to_use / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_delta = g->percent - percent_to_use;
+
+ NETDATA_DOUBLE percent_interpolation_slot = 0.0;
+ NETDATA_DOUBLE percent_last_slot = 0.0;
+ if(percent_delta > 0.0) {
+ NETDATA_DOUBLE percent_to_use_plus_1_slot = (NETDATA_DOUBLE)(slots_to_use + 1) / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_1slot = percent_to_use_plus_1_slot - percent_to_use;
+
+ percent_interpolation_slot = percent_delta / percent_1slot;
+ percent_last_slot = 1 - percent_interpolation_slot;
+ }
+
+ int start_slot, stop_slot, step, last_slot, interpolation_slot;
+ if(min >= 0.0 && max >= 0.0) {
+ start_slot = 0;
+ stop_slot = start_slot + (int)slots_to_use;
+ last_slot = stop_slot - 1;
+ interpolation_slot = stop_slot;
+ step = 1;
+ }
+ else {
+ start_slot = (int)available_slots - 1;
+ stop_slot = start_slot - (int)slots_to_use;
+ last_slot = stop_slot + 1;
+ interpolation_slot = stop_slot;
+ step = -1;
+ }
+
+ value = 0.0;
+ for(int slot = start_slot; slot != stop_slot ; slot += step)
+ value += g->series[slot];
+
+ size_t counted = slots_to_use;
+ if(percent_interpolation_slot > 0.0 && interpolation_slot >= 0 && interpolation_slot < (int)available_slots) {
+ value += g->series[interpolation_slot] * percent_interpolation_slot;
+ value += g->series[last_slot] * percent_last_slot;
+ counted++;
+ }
+
+ value = value / (NETDATA_DOUBLE)counted;
+ }
+ else
+ value = min;
+ }
+
+ if(unlikely(!netdata_double_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ //log_series_to_stderr(g->series, g->next_pos, value, "percentile");
+
+ g->next_pos = 0;
+
+ return value;
+}
#endif //NETDATA_API_QUERIES_PERCENTILE_H
diff --git a/web/api/queries/query.c b/web/api/queries/query.c
index df7e09799..3770d4770 100644
--- a/web/api/queries/query.c
+++ b/web/api/queries/query.c
@@ -24,7 +24,8 @@
static struct {
const char *name;
uint32_t hash;
- RRDR_GROUPING value;
+ RRDR_TIME_GROUPING value;
+ RRDR_TIME_GROUPING add_flush;
// One time initialization for the module.
// This is called once, when netdata starts.
@@ -59,397 +60,445 @@ static struct {
{.name = "average",
.hash = 0,
.value = RRDR_GROUPING_AVERAGE,
+ .add_flush = RRDR_GROUPING_AVERAGE,
.init = NULL,
- .create= grouping_create_average,
- .reset = grouping_reset_average,
- .free = grouping_free_average,
- .add = grouping_add_average,
- .flush = grouping_flush_average,
+ .create= tg_average_create,
+ .reset = tg_average_reset,
+ .free = tg_average_free,
+ .add = tg_average_add,
+ .flush = tg_average_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
- {.name = "mean", // alias on 'average'
+ {.name = "avg", // alias on 'average'
.hash = 0,
.value = RRDR_GROUPING_AVERAGE,
+ .add_flush = RRDR_GROUPING_AVERAGE,
.init = NULL,
- .create= grouping_create_average,
- .reset = grouping_reset_average,
- .free = grouping_free_average,
- .add = grouping_add_average,
- .flush = grouping_flush_average,
+ .create= tg_average_create,
+ .reset = tg_average_reset,
+ .free = tg_average_free,
+ .add = tg_average_add,
+ .flush = tg_average_flush,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "mean", // alias on 'average'
+ .hash = 0,
+ .value = RRDR_GROUPING_AVERAGE,
+ .add_flush = RRDR_GROUPING_AVERAGE,
+ .init = NULL,
+ .create= tg_average_create,
+ .reset = tg_average_reset,
+ .free = tg_average_free,
+ .add = tg_average_add,
+ .flush = tg_average_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-mean1",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEAN1,
+ .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
.init = NULL,
- .create= grouping_create_trimmed_mean1,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
+ .create= tg_trimmed_mean_create_1,
+ .reset = tg_trimmed_mean_reset,
+ .free = tg_trimmed_mean_free,
+ .add = tg_trimmed_mean_add,
+ .flush = tg_trimmed_mean_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-mean2",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEAN2,
+ .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
.init = NULL,
- .create= grouping_create_trimmed_mean2,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
+ .create= tg_trimmed_mean_create_2,
+ .reset = tg_trimmed_mean_reset,
+ .free = tg_trimmed_mean_free,
+ .add = tg_trimmed_mean_add,
+ .flush = tg_trimmed_mean_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-mean3",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEAN3,
+ .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
.init = NULL,
- .create= grouping_create_trimmed_mean3,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
+ .create= tg_trimmed_mean_create_3,
+ .reset = tg_trimmed_mean_reset,
+ .free = tg_trimmed_mean_free,
+ .add = tg_trimmed_mean_add,
+ .flush = tg_trimmed_mean_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-mean5",
.hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN5,
+ .value = RRDR_GROUPING_TRIMMED_MEAN,
+ .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
.init = NULL,
- .create= grouping_create_trimmed_mean5,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
+ .create= tg_trimmed_mean_create_5,
+ .reset = tg_trimmed_mean_reset,
+ .free = tg_trimmed_mean_free,
+ .add = tg_trimmed_mean_add,
+ .flush = tg_trimmed_mean_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-mean10",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEAN10,
+ .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
.init = NULL,
- .create= grouping_create_trimmed_mean10,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
+ .create= tg_trimmed_mean_create_10,
+ .reset = tg_trimmed_mean_reset,
+ .free = tg_trimmed_mean_free,
+ .add = tg_trimmed_mean_add,
+ .flush = tg_trimmed_mean_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-mean15",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEAN15,
+ .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
.init = NULL,
- .create= grouping_create_trimmed_mean15,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
+ .create= tg_trimmed_mean_create_15,
+ .reset = tg_trimmed_mean_reset,
+ .free = tg_trimmed_mean_free,
+ .add = tg_trimmed_mean_add,
+ .flush = tg_trimmed_mean_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-mean20",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEAN20,
+ .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
.init = NULL,
- .create= grouping_create_trimmed_mean20,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
+ .create= tg_trimmed_mean_create_20,
+ .reset = tg_trimmed_mean_reset,
+ .free = tg_trimmed_mean_free,
+ .add = tg_trimmed_mean_add,
+ .flush = tg_trimmed_mean_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-mean25",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEAN25,
+ .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
.init = NULL,
- .create= grouping_create_trimmed_mean25,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
+ .create= tg_trimmed_mean_create_25,
+ .reset = tg_trimmed_mean_reset,
+ .free = tg_trimmed_mean_free,
+ .add = tg_trimmed_mean_add,
+ .flush = tg_trimmed_mean_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-mean",
.hash = 0,
- .value = RRDR_GROUPING_TRIMMED_MEAN5,
+ .value = RRDR_GROUPING_TRIMMED_MEAN,
+ .add_flush = RRDR_GROUPING_TRIMMED_MEAN,
.init = NULL,
- .create= grouping_create_trimmed_mean5,
- .reset = grouping_reset_trimmed_mean,
- .free = grouping_free_trimmed_mean,
- .add = grouping_add_trimmed_mean,
- .flush = grouping_flush_trimmed_mean,
+ .create= tg_trimmed_mean_create_5,
+ .reset = tg_trimmed_mean_reset,
+ .free = tg_trimmed_mean_free,
+ .add = tg_trimmed_mean_add,
+ .flush = tg_trimmed_mean_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "incremental_sum",
.hash = 0,
.value = RRDR_GROUPING_INCREMENTAL_SUM,
+ .add_flush = RRDR_GROUPING_INCREMENTAL_SUM,
.init = NULL,
- .create= grouping_create_incremental_sum,
- .reset = grouping_reset_incremental_sum,
- .free = grouping_free_incremental_sum,
- .add = grouping_add_incremental_sum,
- .flush = grouping_flush_incremental_sum,
+ .create= tg_incremental_sum_create,
+ .reset = tg_incremental_sum_reset,
+ .free = tg_incremental_sum_free,
+ .add = tg_incremental_sum_add,
+ .flush = tg_incremental_sum_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "incremental-sum",
.hash = 0,
.value = RRDR_GROUPING_INCREMENTAL_SUM,
+ .add_flush = RRDR_GROUPING_INCREMENTAL_SUM,
.init = NULL,
- .create= grouping_create_incremental_sum,
- .reset = grouping_reset_incremental_sum,
- .free = grouping_free_incremental_sum,
- .add = grouping_add_incremental_sum,
- .flush = grouping_flush_incremental_sum,
+ .create= tg_incremental_sum_create,
+ .reset = tg_incremental_sum_reset,
+ .free = tg_incremental_sum_free,
+ .add = tg_incremental_sum_add,
+ .flush = tg_incremental_sum_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "median",
.hash = 0,
.value = RRDR_GROUPING_MEDIAN,
+ .add_flush = RRDR_GROUPING_MEDIAN,
.init = NULL,
- .create= grouping_create_median,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
+ .create= tg_median_create,
+ .reset = tg_median_reset,
+ .free = tg_median_free,
+ .add = tg_median_add,
+ .flush = tg_median_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-median1",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEDIAN1,
+ .add_flush = RRDR_GROUPING_MEDIAN,
.init = NULL,
- .create= grouping_create_trimmed_median1,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
+ .create= tg_median_create_trimmed_1,
+ .reset = tg_median_reset,
+ .free = tg_median_free,
+ .add = tg_median_add,
+ .flush = tg_median_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-median2",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEDIAN2,
+ .add_flush = RRDR_GROUPING_MEDIAN,
.init = NULL,
- .create= grouping_create_trimmed_median2,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
+ .create= tg_median_create_trimmed_2,
+ .reset = tg_median_reset,
+ .free = tg_median_free,
+ .add = tg_median_add,
+ .flush = tg_median_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-median3",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEDIAN3,
+ .add_flush = RRDR_GROUPING_MEDIAN,
.init = NULL,
- .create= grouping_create_trimmed_median3,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
+ .create= tg_median_create_trimmed_3,
+ .reset = tg_median_reset,
+ .free = tg_median_free,
+ .add = tg_median_add,
+ .flush = tg_median_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-median5",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEDIAN5,
+ .add_flush = RRDR_GROUPING_MEDIAN,
.init = NULL,
- .create= grouping_create_trimmed_median5,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
+ .create= tg_median_create_trimmed_5,
+ .reset = tg_median_reset,
+ .free = tg_median_free,
+ .add = tg_median_add,
+ .flush = tg_median_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-median10",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEDIAN10,
+ .add_flush = RRDR_GROUPING_MEDIAN,
.init = NULL,
- .create= grouping_create_trimmed_median10,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
+ .create= tg_median_create_trimmed_10,
+ .reset = tg_median_reset,
+ .free = tg_median_free,
+ .add = tg_median_add,
+ .flush = tg_median_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-median15",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEDIAN15,
+ .add_flush = RRDR_GROUPING_MEDIAN,
.init = NULL,
- .create= grouping_create_trimmed_median15,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
+ .create= tg_median_create_trimmed_15,
+ .reset = tg_median_reset,
+ .free = tg_median_free,
+ .add = tg_median_add,
+ .flush = tg_median_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-median20",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEDIAN20,
+ .add_flush = RRDR_GROUPING_MEDIAN,
.init = NULL,
- .create= grouping_create_trimmed_median20,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
+ .create= tg_median_create_trimmed_20,
+ .reset = tg_median_reset,
+ .free = tg_median_free,
+ .add = tg_median_add,
+ .flush = tg_median_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-median25",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEDIAN25,
+ .add_flush = RRDR_GROUPING_MEDIAN,
.init = NULL,
- .create= grouping_create_trimmed_median25,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
+ .create= tg_median_create_trimmed_25,
+ .reset = tg_median_reset,
+ .free = tg_median_free,
+ .add = tg_median_add,
+ .flush = tg_median_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "trimmed-median",
.hash = 0,
.value = RRDR_GROUPING_TRIMMED_MEDIAN5,
+ .add_flush = RRDR_GROUPING_MEDIAN,
.init = NULL,
- .create= grouping_create_trimmed_median5,
- .reset = grouping_reset_median,
- .free = grouping_free_median,
- .add = grouping_add_median,
- .flush = grouping_flush_median,
+ .create= tg_median_create_trimmed_5,
+ .reset = tg_median_reset,
+ .free = tg_median_free,
+ .add = tg_median_add,
+ .flush = tg_median_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "percentile25",
.hash = 0,
.value = RRDR_GROUPING_PERCENTILE25,
+ .add_flush = RRDR_GROUPING_PERCENTILE,
.init = NULL,
- .create= grouping_create_percentile25,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
+ .create= tg_percentile_create_25,
+ .reset = tg_percentile_reset,
+ .free = tg_percentile_free,
+ .add = tg_percentile_add,
+ .flush = tg_percentile_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "percentile50",
.hash = 0,
.value = RRDR_GROUPING_PERCENTILE50,
+ .add_flush = RRDR_GROUPING_PERCENTILE,
.init = NULL,
- .create= grouping_create_percentile50,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
+ .create= tg_percentile_create_50,
+ .reset = tg_percentile_reset,
+ .free = tg_percentile_free,
+ .add = tg_percentile_add,
+ .flush = tg_percentile_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "percentile75",
.hash = 0,
.value = RRDR_GROUPING_PERCENTILE75,
+ .add_flush = RRDR_GROUPING_PERCENTILE,
.init = NULL,
- .create= grouping_create_percentile75,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
+ .create= tg_percentile_create_75,
+ .reset = tg_percentile_reset,
+ .free = tg_percentile_free,
+ .add = tg_percentile_add,
+ .flush = tg_percentile_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "percentile80",
.hash = 0,
.value = RRDR_GROUPING_PERCENTILE80,
+ .add_flush = RRDR_GROUPING_PERCENTILE,
.init = NULL,
- .create= grouping_create_percentile80,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
+ .create= tg_percentile_create_80,
+ .reset = tg_percentile_reset,
+ .free = tg_percentile_free,
+ .add = tg_percentile_add,
+ .flush = tg_percentile_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "percentile90",
.hash = 0,
.value = RRDR_GROUPING_PERCENTILE90,
+ .add_flush = RRDR_GROUPING_PERCENTILE,
.init = NULL,
- .create= grouping_create_percentile90,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
+ .create= tg_percentile_create_90,
+ .reset = tg_percentile_reset,
+ .free = tg_percentile_free,
+ .add = tg_percentile_add,
+ .flush = tg_percentile_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "percentile95",
.hash = 0,
- .value = RRDR_GROUPING_PERCENTILE95,
+ .value = RRDR_GROUPING_PERCENTILE,
+ .add_flush = RRDR_GROUPING_PERCENTILE,
.init = NULL,
- .create= grouping_create_percentile95,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
+ .create= tg_percentile_create_95,
+ .reset = tg_percentile_reset,
+ .free = tg_percentile_free,
+ .add = tg_percentile_add,
+ .flush = tg_percentile_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "percentile97",
.hash = 0,
.value = RRDR_GROUPING_PERCENTILE97,
+ .add_flush = RRDR_GROUPING_PERCENTILE,
.init = NULL,
- .create= grouping_create_percentile97,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
+ .create= tg_percentile_create_97,
+ .reset = tg_percentile_reset,
+ .free = tg_percentile_free,
+ .add = tg_percentile_add,
+ .flush = tg_percentile_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "percentile98",
.hash = 0,
.value = RRDR_GROUPING_PERCENTILE98,
+ .add_flush = RRDR_GROUPING_PERCENTILE,
.init = NULL,
- .create= grouping_create_percentile98,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
+ .create= tg_percentile_create_98,
+ .reset = tg_percentile_reset,
+ .free = tg_percentile_free,
+ .add = tg_percentile_add,
+ .flush = tg_percentile_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "percentile99",
.hash = 0,
.value = RRDR_GROUPING_PERCENTILE99,
+ .add_flush = RRDR_GROUPING_PERCENTILE,
.init = NULL,
- .create= grouping_create_percentile99,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
+ .create= tg_percentile_create_99,
+ .reset = tg_percentile_reset,
+ .free = tg_percentile_free,
+ .add = tg_percentile_add,
+ .flush = tg_percentile_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "percentile",
.hash = 0,
- .value = RRDR_GROUPING_PERCENTILE95,
+ .value = RRDR_GROUPING_PERCENTILE,
+ .add_flush = RRDR_GROUPING_PERCENTILE,
.init = NULL,
- .create= grouping_create_percentile95,
- .reset = grouping_reset_percentile,
- .free = grouping_free_percentile,
- .add = grouping_add_percentile,
- .flush = grouping_flush_percentile,
+ .create= tg_percentile_create_95,
+ .reset = tg_percentile_reset,
+ .free = tg_percentile_free,
+ .add = tg_percentile_add,
+ .flush = tg_percentile_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "min",
.hash = 0,
.value = RRDR_GROUPING_MIN,
+ .add_flush = RRDR_GROUPING_MIN,
.init = NULL,
- .create= grouping_create_min,
- .reset = grouping_reset_min,
- .free = grouping_free_min,
- .add = grouping_add_min,
- .flush = grouping_flush_min,
+ .create= tg_min_create,
+ .reset = tg_min_reset,
+ .free = tg_min_free,
+ .add = tg_min_add,
+ .flush = tg_min_flush,
.tier_query_fetch = TIER_QUERY_FETCH_MIN
},
{.name = "max",
.hash = 0,
.value = RRDR_GROUPING_MAX,
+ .add_flush = RRDR_GROUPING_MAX,
.init = NULL,
- .create= grouping_create_max,
- .reset = grouping_reset_max,
- .free = grouping_free_max,
- .add = grouping_add_max,
- .flush = grouping_flush_max,
+ .create= tg_max_create,
+ .reset = tg_max_reset,
+ .free = tg_max_free,
+ .add = tg_max_add,
+ .flush = tg_max_flush,
.tier_query_fetch = TIER_QUERY_FETCH_MAX
},
{.name = "sum",
.hash = 0,
.value = RRDR_GROUPING_SUM,
+ .add_flush = RRDR_GROUPING_SUM,
.init = NULL,
- .create= grouping_create_sum,
- .reset = grouping_reset_sum,
- .free = grouping_free_sum,
- .add = grouping_add_sum,
- .flush = grouping_flush_sum,
+ .create= tg_sum_create,
+ .reset = tg_sum_reset,
+ .free = tg_sum_free,
+ .add = tg_sum_add,
+ .flush = tg_sum_flush,
.tier_query_fetch = TIER_QUERY_FETCH_SUM
},
@@ -457,97 +506,75 @@ static struct {
{.name = "stddev",
.hash = 0,
.value = RRDR_GROUPING_STDDEV,
+ .add_flush = RRDR_GROUPING_STDDEV,
.init = NULL,
- .create= grouping_create_stddev,
- .reset = grouping_reset_stddev,
- .free = grouping_free_stddev,
- .add = grouping_add_stddev,
- .flush = grouping_flush_stddev,
+ .create= tg_stddev_create,
+ .reset = tg_stddev_reset,
+ .free = tg_stddev_free,
+ .add = tg_stddev_add,
+ .flush = tg_stddev_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "cv", // coefficient variation is calculated by stddev
.hash = 0,
.value = RRDR_GROUPING_CV,
+ .add_flush = RRDR_GROUPING_CV,
.init = NULL,
- .create= grouping_create_stddev, // not an error, stddev calculates this too
- .reset = grouping_reset_stddev, // not an error, stddev calculates this too
- .free = grouping_free_stddev, // not an error, stddev calculates this too
- .add = grouping_add_stddev, // not an error, stddev calculates this too
- .flush = grouping_flush_coefficient_of_variation,
+ .create= tg_stddev_create, // not an error, stddev calculates this too
+ .reset = tg_stddev_reset, // not an error, stddev calculates this too
+ .free = tg_stddev_free, // not an error, stddev calculates this too
+ .add = tg_stddev_add, // not an error, stddev calculates this too
+ .flush = tg_stddev_coefficient_of_variation_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "rsd", // alias of 'cv'
.hash = 0,
.value = RRDR_GROUPING_CV,
+ .add_flush = RRDR_GROUPING_CV,
.init = NULL,
- .create= grouping_create_stddev, // not an error, stddev calculates this too
- .reset = grouping_reset_stddev, // not an error, stddev calculates this too
- .free = grouping_free_stddev, // not an error, stddev calculates this too
- .add = grouping_add_stddev, // not an error, stddev calculates this too
- .flush = grouping_flush_coefficient_of_variation,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
-
- /*
- {.name = "mean", // same as average, no need to define it again
- .hash = 0,
- .value = RRDR_GROUPING_MEAN,
- .setup = NULL,
- .create= grouping_create_stddev,
- .reset = grouping_reset_stddev,
- .free = grouping_free_stddev,
- .add = grouping_add_stddev,
- .flush = grouping_flush_mean,
+ .create= tg_stddev_create, // not an error, stddev calculates this too
+ .reset = tg_stddev_reset, // not an error, stddev calculates this too
+ .free = tg_stddev_free, // not an error, stddev calculates this too
+ .add = tg_stddev_add, // not an error, stddev calculates this too
+ .flush = tg_stddev_coefficient_of_variation_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
- */
-
- /*
- {.name = "variance", // meaningless to offer
- .hash = 0,
- .value = RRDR_GROUPING_VARIANCE,
- .setup = NULL,
- .create= grouping_create_stddev,
- .reset = grouping_reset_stddev,
- .free = grouping_free_stddev,
- .add = grouping_add_stddev,
- .flush = grouping_flush_variance,
- .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
- },
- */
// single exponential smoothing
{.name = "ses",
.hash = 0,
.value = RRDR_GROUPING_SES,
- .init = grouping_init_ses,
- .create= grouping_create_ses,
- .reset = grouping_reset_ses,
- .free = grouping_free_ses,
- .add = grouping_add_ses,
- .flush = grouping_flush_ses,
+ .add_flush = RRDR_GROUPING_SES,
+ .init = tg_ses_init,
+ .create= tg_ses_create,
+ .reset = tg_ses_reset,
+ .free = tg_ses_free,
+ .add = tg_ses_add,
+ .flush = tg_ses_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "ema", // alias for 'ses'
.hash = 0,
.value = RRDR_GROUPING_SES,
+ .add_flush = RRDR_GROUPING_SES,
.init = NULL,
- .create= grouping_create_ses,
- .reset = grouping_reset_ses,
- .free = grouping_free_ses,
- .add = grouping_add_ses,
- .flush = grouping_flush_ses,
+ .create= tg_ses_create,
+ .reset = tg_ses_reset,
+ .free = tg_ses_free,
+ .add = tg_ses_add,
+ .flush = tg_ses_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "ewma", // alias for ses
.hash = 0,
.value = RRDR_GROUPING_SES,
+ .add_flush = RRDR_GROUPING_SES,
.init = NULL,
- .create= grouping_create_ses,
- .reset = grouping_reset_ses,
- .free = grouping_free_ses,
- .add = grouping_add_ses,
- .flush = grouping_flush_ses,
+ .create= tg_ses_create,
+ .reset = tg_ses_reset,
+ .free = tg_ses_free,
+ .add = tg_ses_add,
+ .flush = tg_ses_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
@@ -555,24 +582,26 @@ static struct {
{.name = "des",
.hash = 0,
.value = RRDR_GROUPING_DES,
- .init = grouping_init_des,
- .create= grouping_create_des,
- .reset = grouping_reset_des,
- .free = grouping_free_des,
- .add = grouping_add_des,
- .flush = grouping_flush_des,
+ .add_flush = RRDR_GROUPING_DES,
+ .init = tg_des_init,
+ .create= tg_des_create,
+ .reset = tg_des_reset,
+ .free = tg_des_free,
+ .add = tg_des_add,
+ .flush = tg_des_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "countif",
.hash = 0,
.value = RRDR_GROUPING_COUNTIF,
+ .add_flush = RRDR_GROUPING_COUNTIF,
.init = NULL,
- .create= grouping_create_countif,
- .reset = grouping_reset_countif,
- .free = grouping_free_countif,
- .add = grouping_add_countif,
- .flush = grouping_flush_countif,
+ .create= tg_countif_create,
+ .reset = tg_countif_reset,
+ .free = tg_countif_free,
+ .add = tg_countif_add,
+ .flush = tg_countif_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
@@ -580,17 +609,18 @@ static struct {
{.name = NULL,
.hash = 0,
.value = RRDR_GROUPING_UNDEFINED,
+ .add_flush = RRDR_GROUPING_AVERAGE,
.init = NULL,
- .create= grouping_create_average,
- .reset = grouping_reset_average,
- .free = grouping_free_average,
- .add = grouping_add_average,
- .flush = grouping_flush_average,
+ .create= tg_average_create,
+ .reset = tg_average_reset,
+ .free = tg_average_free,
+ .add = tg_average_add,
+ .flush = tg_average_flush,
.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
}
};
-void web_client_api_v1_init_grouping(void) {
+void time_grouping_init(void) {
int i;
for(i = 0; api_v1_data_groups[i].name ; i++) {
@@ -601,7 +631,7 @@ void web_client_api_v1_init_grouping(void) {
}
}
-const char *group_method2string(RRDR_GROUPING group) {
+const char *time_grouping_method2string(RRDR_TIME_GROUPING group) {
int i;
for(i = 0; api_v1_data_groups[i].name ; i++) {
@@ -613,7 +643,7 @@ const char *group_method2string(RRDR_GROUPING group) {
return "unknown-group-method";
}
-RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def) {
+RRDR_TIME_GROUPING time_grouping_parse(const char *name, RRDR_TIME_GROUPING def) {
int i;
uint32_t hash = simple_hash(name);
@@ -624,7 +654,7 @@ RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPI
return def;
}
-const char *web_client_api_request_v1_data_group_to_string(RRDR_GROUPING group) {
+const char *time_grouping_tostring(RRDR_TIME_GROUPING group) {
int i;
for(i = 0; api_v1_data_groups[i].name ; i++)
@@ -634,28 +664,242 @@ const char *web_client_api_request_v1_data_group_to_string(RRDR_GROUPING group)
return "unknown";
}
-static void rrdr_set_grouping_function(RRDR *r, RRDR_GROUPING group_method) {
+static void rrdr_set_grouping_function(RRDR *r, RRDR_TIME_GROUPING group_method) {
int i, found = 0;
for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
if(api_v1_data_groups[i].value == group_method) {
- r->internal.grouping_create = api_v1_data_groups[i].create;
- r->internal.grouping_reset = api_v1_data_groups[i].reset;
- r->internal.grouping_free = api_v1_data_groups[i].free;
- r->internal.grouping_add = api_v1_data_groups[i].add;
- r->internal.grouping_flush = api_v1_data_groups[i].flush;
- r->internal.tier_query_fetch = api_v1_data_groups[i].tier_query_fetch;
+ r->time_grouping.create = api_v1_data_groups[i].create;
+ r->time_grouping.reset = api_v1_data_groups[i].reset;
+ r->time_grouping.free = api_v1_data_groups[i].free;
+ r->time_grouping.add = api_v1_data_groups[i].add;
+ r->time_grouping.flush = api_v1_data_groups[i].flush;
+ r->time_grouping.tier_query_fetch = api_v1_data_groups[i].tier_query_fetch;
+ r->time_grouping.add_flush = api_v1_data_groups[i].add_flush;
found = 1;
}
}
if(!found) {
errno = 0;
internal_error(true, "QUERY: grouping method %u not found. Using 'average'", (unsigned int)group_method);
- r->internal.grouping_create = grouping_create_average;
- r->internal.grouping_reset = grouping_reset_average;
- r->internal.grouping_free = grouping_free_average;
- r->internal.grouping_add = grouping_add_average;
- r->internal.grouping_flush = grouping_flush_average;
- r->internal.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE;
+ r->time_grouping.create = tg_average_create;
+ r->time_grouping.reset = tg_average_reset;
+ r->time_grouping.free = tg_average_free;
+ r->time_grouping.add = tg_average_add;
+ r->time_grouping.flush = tg_average_flush;
+ r->time_grouping.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE;
+ r->time_grouping.add_flush = RRDR_GROUPING_AVERAGE;
+ }
+}
+
+static inline void time_grouping_add(RRDR *r, NETDATA_DOUBLE value, const RRDR_TIME_GROUPING add_flush) {
+ switch(add_flush) {
+ case RRDR_GROUPING_AVERAGE:
+ tg_average_add(r, value);
+ break;
+
+ case RRDR_GROUPING_MAX:
+ tg_max_add(r, value);
+ break;
+
+ case RRDR_GROUPING_MIN:
+ tg_min_add(r, value);
+ break;
+
+ case RRDR_GROUPING_MEDIAN:
+ tg_median_add(r, value);
+ break;
+
+ case RRDR_GROUPING_STDDEV:
+ case RRDR_GROUPING_CV:
+ tg_stddev_add(r, value);
+ break;
+
+ case RRDR_GROUPING_SUM:
+ tg_sum_add(r, value);
+ break;
+
+ case RRDR_GROUPING_COUNTIF:
+ tg_countif_add(r, value);
+ break;
+
+ case RRDR_GROUPING_TRIMMED_MEAN:
+ tg_trimmed_mean_add(r, value);
+ break;
+
+ case RRDR_GROUPING_PERCENTILE:
+ tg_percentile_add(r, value);
+ break;
+
+ case RRDR_GROUPING_SES:
+ tg_ses_add(r, value);
+ break;
+
+ case RRDR_GROUPING_DES:
+ tg_des_add(r, value);
+ break;
+
+ case RRDR_GROUPING_INCREMENTAL_SUM:
+ tg_incremental_sum_add(r, value);
+ break;
+
+ default:
+ r->time_grouping.add(r, value);
+ break;
+ }
+}
+
+static inline NETDATA_DOUBLE time_grouping_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr, const RRDR_TIME_GROUPING add_flush) {
+ switch(add_flush) {
+ case RRDR_GROUPING_AVERAGE:
+ return tg_average_flush(r, rrdr_value_options_ptr);
+
+ case RRDR_GROUPING_MAX:
+ return tg_max_flush(r, rrdr_value_options_ptr);
+
+ case RRDR_GROUPING_MIN:
+ return tg_min_flush(r, rrdr_value_options_ptr);
+
+ case RRDR_GROUPING_MEDIAN:
+ return tg_median_flush(r, rrdr_value_options_ptr);
+
+ case RRDR_GROUPING_STDDEV:
+ return tg_stddev_flush(r, rrdr_value_options_ptr);
+
+ case RRDR_GROUPING_CV:
+ return tg_stddev_coefficient_of_variation_flush(r, rrdr_value_options_ptr);
+
+ case RRDR_GROUPING_SUM:
+ return tg_sum_flush(r, rrdr_value_options_ptr);
+
+ case RRDR_GROUPING_COUNTIF:
+ return tg_countif_flush(r, rrdr_value_options_ptr);
+
+ case RRDR_GROUPING_TRIMMED_MEAN:
+ return tg_trimmed_mean_flush(r, rrdr_value_options_ptr);
+
+ case RRDR_GROUPING_PERCENTILE:
+ return tg_percentile_flush(r, rrdr_value_options_ptr);
+
+ case RRDR_GROUPING_SES:
+ return tg_ses_flush(r, rrdr_value_options_ptr);
+
+ case RRDR_GROUPING_DES:
+ return tg_des_flush(r, rrdr_value_options_ptr);
+
+ case RRDR_GROUPING_INCREMENTAL_SUM:
+ return tg_incremental_sum_flush(r, rrdr_value_options_ptr);
+
+ default:
+ return r->time_grouping.flush(r, rrdr_value_options_ptr);
+ }
+}
+
+RRDR_GROUP_BY group_by_parse(char *s) {
+ RRDR_GROUP_BY group_by = RRDR_GROUP_BY_NONE;
+
+ while(s) {
+ char *key = strsep_skip_consecutive_separators(&s, ",| ");
+ if (!key || !*key) continue;
+
+ if (strcmp(key, "selected") == 0)
+ group_by |= RRDR_GROUP_BY_SELECTED;
+
+ if (strcmp(key, "dimension") == 0)
+ group_by |= RRDR_GROUP_BY_DIMENSION;
+
+ if (strcmp(key, "instance") == 0)
+ group_by |= RRDR_GROUP_BY_INSTANCE;
+
+ if (strcmp(key, "percentage-of-instance") == 0)
+ group_by |= RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE;
+
+ if (strcmp(key, "label") == 0)
+ group_by |= RRDR_GROUP_BY_LABEL;
+
+ if (strcmp(key, "node") == 0)
+ group_by |= RRDR_GROUP_BY_NODE;
+
+ if (strcmp(key, "context") == 0)
+ group_by |= RRDR_GROUP_BY_CONTEXT;
+
+ if (strcmp(key, "units") == 0)
+ group_by |= RRDR_GROUP_BY_UNITS;
+ }
+
+ if((group_by & RRDR_GROUP_BY_SELECTED) && (group_by & ~RRDR_GROUP_BY_SELECTED)) {
+ internal_error(true, "group-by given by query has 'selected' together with more groupings");
+ group_by = RRDR_GROUP_BY_SELECTED; // remove all other groupings
+ }
+
+ if(group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)
+ group_by = RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE; // remove all other groupings
+
+ return group_by;
+}
+
+void buffer_json_group_by_to_array(BUFFER *wb, RRDR_GROUP_BY group_by) {
+ if(group_by == RRDR_GROUP_BY_NONE)
+ buffer_json_add_array_item_string(wb, "none");
+ else {
+ if (group_by & RRDR_GROUP_BY_DIMENSION)
+ buffer_json_add_array_item_string(wb, "dimension");
+
+ if (group_by & RRDR_GROUP_BY_INSTANCE)
+ buffer_json_add_array_item_string(wb, "instance");
+
+ if (group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)
+ buffer_json_add_array_item_string(wb, "percentage-of-instance");
+
+ if (group_by & RRDR_GROUP_BY_LABEL)
+ buffer_json_add_array_item_string(wb, "label");
+
+ if (group_by & RRDR_GROUP_BY_NODE)
+ buffer_json_add_array_item_string(wb, "node");
+
+ if (group_by & RRDR_GROUP_BY_CONTEXT)
+ buffer_json_add_array_item_string(wb, "context");
+
+ if (group_by & RRDR_GROUP_BY_UNITS)
+ buffer_json_add_array_item_string(wb, "units");
+
+ if (group_by & RRDR_GROUP_BY_SELECTED)
+ buffer_json_add_array_item_string(wb, "selected");
+ }
+}
+
+RRDR_GROUP_BY_FUNCTION group_by_aggregate_function_parse(const char *s) {
+ if(strcmp(s, "average") == 0)
+ return RRDR_GROUP_BY_FUNCTION_AVERAGE;
+
+ if(strcmp(s, "avg") == 0)
+ return RRDR_GROUP_BY_FUNCTION_AVERAGE;
+
+ if(strcmp(s, "min") == 0)
+ return RRDR_GROUP_BY_FUNCTION_MIN;
+
+ if(strcmp(s, "max") == 0)
+ return RRDR_GROUP_BY_FUNCTION_MAX;
+
+ if(strcmp(s, "sum") == 0)
+ return RRDR_GROUP_BY_FUNCTION_SUM;
+
+ return RRDR_GROUP_BY_FUNCTION_AVERAGE;
+}
+
+const char *group_by_aggregate_function_to_string(RRDR_GROUP_BY_FUNCTION group_by_function) {
+ switch(group_by_function) {
+ default:
+ case RRDR_GROUP_BY_FUNCTION_AVERAGE:
+ return "average";
+
+ case RRDR_GROUP_BY_FUNCTION_MIN:
+ return "min";
+
+ case RRDR_GROUP_BY_FUNCTION_MAX:
+ return "max";
+
+ case RRDR_GROUP_BY_FUNCTION_SUM:
+ return "sum";
}
}
@@ -670,28 +914,20 @@ static inline NETDATA_DOUBLE *UNUSED_FUNCTION(rrdr_line_values)(RRDR *r, long rr
return &r->v[ rrdr_line * r->d ];
}
-static inline long rrdr_line_init(RRDR *r, time_t t, long rrdr_line) {
+static inline long rrdr_line_init(RRDR *r __maybe_unused, time_t t __maybe_unused, long rrdr_line) {
rrdr_line++;
- internal_error(rrdr_line >= (long)r->n,
+ internal_fatal(rrdr_line >= (long)r->n,
"QUERY: requested to step above RRDR size for query '%s'",
r->internal.qt->id);
- internal_error(r->t[rrdr_line] != 0 && r->t[rrdr_line] != t,
- "QUERY: overwriting the timestamp of RRDR line %zu from %zu to %zu, of query '%s'",
- (size_t)rrdr_line, (size_t)r->t[rrdr_line], (size_t)t, r->internal.qt->id);
-
- // save the time
- r->t[rrdr_line] = t;
+ internal_fatal(r->t[rrdr_line] != t,
+ "QUERY: wrong timestamp at RRDR line %ld, expected %ld, got %ld, of query '%s'",
+ rrdr_line, r->t[rrdr_line], t, r->internal.qt->id);
return rrdr_line;
}
-static inline void rrdr_done(RRDR *r, long rrdr_line) {
- r->rows = rrdr_line + 1;
-}
-
-
// ----------------------------------------------------------------------------
// tier management
@@ -822,7 +1058,7 @@ static size_t rrddim_find_best_tier_for_timeframe(QUERY_TARGET *qt, time_t after
// find the db time-range for this tier for all metrics
for(size_t i = 0, used = qt->query.used; i < used ; i++) {
- QUERY_METRIC *qm = &qt->query.array[i];
+ QUERY_METRIC *qm = query_metric(qt, i);
time_t first_time_s = qm->tiers[tier].db_first_time_s;
time_t last_time_s = qm->tiers[tier].db_last_time_s;
@@ -872,7 +1108,7 @@ static time_t rrdset_find_natural_update_every_for_timeframe(QUERY_TARGET *qt, t
// find the db minimum update every for this tier for all metrics
time_t common_update_every_s = default_rrd_update_every;
for(size_t i = 0, used = qt->query.used; i < used ; i++) {
- QUERY_METRIC *qm = &qt->query.array[i];
+ QUERY_METRIC *qm = query_metric(qt, i);
time_t update_every_s = qm->tiers[best_tier].db_update_every_s;
@@ -889,24 +1125,20 @@ static time_t rrdset_find_natural_update_every_for_timeframe(QUERY_TARGET *qt, t
// query ops
typedef struct query_point {
- time_t end_time;
- time_t start_time;
+ STORAGE_POINT sp;
NETDATA_DOUBLE value;
- NETDATA_DOUBLE anomaly;
- SN_FLAGS flags;
+ bool added;
#ifdef NETDATA_INTERNAL_CHECKS
size_t id;
#endif
} QUERY_POINT;
QUERY_POINT QUERY_POINT_EMPTY = {
- .end_time = 0,
- .start_time = 0,
- .value = NAN,
- .anomaly = 0,
- .flags = SN_FLAG_NONE,
+ .sp = STORAGE_POINT_UNSET,
+ .value = NAN,
+ .added = false,
#ifdef NETDATA_INTERNAL_CHECKS
- .id = 0,
+ .id = 0,
#endif
};
@@ -934,21 +1166,27 @@ typedef struct query_engine_ops {
size_t tier;
struct query_metric_tier *tier_ptr;
struct storage_engine_query_handle *handle;
- STORAGE_POINT (*next_metric)(struct storage_engine_query_handle *handle);
- int (*is_finished)(struct storage_engine_query_handle *handle);
- void (*finalize)(struct storage_engine_query_handle *handle);
// aggregating points over time
- void (*grouping_add)(struct rrdresult *r, NETDATA_DOUBLE value);
- NETDATA_DOUBLE (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
size_t group_points_non_zero;
size_t group_points_added;
- NETDATA_DOUBLE group_anomaly_rate;
+ STORAGE_POINT group_point; // aggregates min, max, sum, count, anomaly count for each group point
+ STORAGE_POINT query_point; // aggregates min, max, sum, count, anomaly count across the whole query
RRDR_VALUE_FLAGS group_value_flags;
// statistics
size_t db_total_points_read;
size_t db_points_read_per_tier[RRD_STORAGE_TIERS];
+
+ struct {
+ time_t expanded_after;
+ time_t expanded_before;
+ struct storage_engine_query_handle handle;
+ bool initialized;
+ bool finalized;
+ } plans[QUERY_PLANS_MAX];
+
+ struct query_engine_ops *next;
} QUERY_ENGINE_OPS;
@@ -1005,40 +1243,28 @@ static void query_planer_initialize_plans(QUERY_ENGINE_OPS *ops) {
time_t after = qm->plan.array[p].after - (time_t)(update_every * points_to_add_to_after);
time_t before = qm->plan.array[p].before + (time_t)(update_every * points_to_add_to_before);
- qm->plan.array[p].expanded_after = after;
- qm->plan.array[p].expanded_before = before;
+ ops->plans[p].expanded_after = after;
+ ops->plans[p].expanded_before = before;
+
+ ops->r->internal.qt->db.tiers[tier].queries++;
struct query_metric_tier *tier_ptr = &qm->tiers[tier];
- tier_ptr->eng->api.query_ops.init(
- tier_ptr->db_metric_handle,
- &qm->plan.array[p].handle,
- after, before,
- ops->r->internal.qt->request.priority);
-
- qm->plan.array[p].next_metric = tier_ptr->eng->api.query_ops.next_metric;
- qm->plan.array[p].is_finished = tier_ptr->eng->api.query_ops.is_finished;
- qm->plan.array[p].finalize = tier_ptr->eng->api.query_ops.finalize;
- qm->plan.array[p].initialized = true;
- qm->plan.array[p].finalized = false;
+ STORAGE_ENGINE *eng = query_metric_storage_engine(ops->r->internal.qt, qm, tier);
+ storage_engine_query_init(eng->backend, tier_ptr->db_metric_handle, &ops->plans[p].handle,
+ after, before, ops->r->internal.qt->request.priority);
+
+ ops->plans[p].initialized = true;
+ ops->plans[p].finalized = false;
}
}
static void query_planer_finalize_plan(QUERY_ENGINE_OPS *ops, size_t plan_id) {
- QUERY_METRIC *qm = ops->qm;
-
- if(qm->plan.array[plan_id].initialized && !qm->plan.array[plan_id].finalized) {
- qm->plan.array[plan_id].finalize(&qm->plan.array[plan_id].handle);
- qm->plan.array[plan_id].initialized = false;
- qm->plan.array[plan_id].finalized = true;
- qm->plan.array[plan_id].next_metric = NULL;
- qm->plan.array[plan_id].is_finished = NULL;
- qm->plan.array[plan_id].finalize = NULL;
+ // QUERY_METRIC *qm = ops->qm;
- if(ops->current_plan == plan_id) {
- ops->next_metric = NULL;
- ops->is_finished = NULL;
- ops->finalize = NULL;
- }
+ if(ops->plans[plan_id].initialized && !ops->plans[plan_id].finalized) {
+ storage_engine_query_finalize(&ops->plans[plan_id].handle);
+ ops->plans[plan_id].initialized = false;
+ ops->plans[plan_id].finalized = true;
}
}
@@ -1053,17 +1279,14 @@ static void query_planer_activate_plan(QUERY_ENGINE_OPS *ops, size_t plan_id, ti
QUERY_METRIC *qm = ops->qm;
internal_fatal(plan_id >= qm->plan.used, "QUERY: invalid plan_id given");
- internal_fatal(!qm->plan.array[plan_id].initialized, "QUERY: plan has not been initialized");
- internal_fatal(qm->plan.array[plan_id].finalized, "QUERY: plan has been finalized");
+ internal_fatal(!ops->plans[plan_id].initialized, "QUERY: plan has not been initialized");
+ internal_fatal(ops->plans[plan_id].finalized, "QUERY: plan has been finalized");
internal_fatal(qm->plan.array[plan_id].after > qm->plan.array[plan_id].before, "QUERY: flipped after/before");
ops->tier = qm->plan.array[plan_id].tier;
ops->tier_ptr = &qm->tiers[ops->tier];
- ops->handle = &qm->plan.array[plan_id].handle;
- ops->next_metric = qm->plan.array[plan_id].next_metric;
- ops->is_finished = qm->plan.array[plan_id].is_finished;
- ops->finalize = qm->plan.array[plan_id].finalize;
+ ops->handle = &ops->plans[plan_id].handle;
ops->current_plan = plan_id;
if(plan_id + 1 < qm->plan.used && qm->plan.array[plan_id + 1].after < qm->plan.array[plan_id].before)
@@ -1071,8 +1294,8 @@ static void query_planer_activate_plan(QUERY_ENGINE_OPS *ops, size_t plan_id, ti
else
ops->current_plan_expire_time = qm->plan.array[plan_id].before;
- ops->plan_expanded_after = qm->plan.array[plan_id].expanded_after;
- ops->plan_expanded_before = qm->plan.array[plan_id].expanded_before;
+ ops->plan_expanded_after = ops->plans[plan_id].expanded_after;
+ ops->plan_expanded_before = ops->plans[plan_id].expanded_before;
}
static bool query_planer_next_plan(QUERY_ENGINE_OPS *ops, time_t now, time_t last_point_end_time) {
@@ -1117,18 +1340,17 @@ static bool query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before
// put our selected tier as the first plan
size_t selected_tier;
+ bool switch_tiers = true;
- if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER
+ if((ops->r->internal.qt->window.options & RRDR_OPTION_SELECTED_TIER)
&& ops->r->internal.qt->window.tier < storage_tiers
&& query_metric_is_valid_tier(qm, ops->r->internal.qt->window.tier)) {
selected_tier = ops->r->internal.qt->window.tier;
+ switch_tiers = false;
}
else {
selected_tier = query_metric_best_tier_for_timeframe(qm, after_wanted, before_wanted, points_wanted);
- if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)
- ops->r->internal.query_options &= ~RRDR_OPTION_SELECTED_TIER;
-
if(!query_metric_is_valid_tier(qm, selected_tier))
return false;
@@ -1142,7 +1364,7 @@ static bool query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before
qm->plan.array[0].after = (qm->tiers[selected_tier].db_first_time_s < after_wanted) ? after_wanted : qm->tiers[selected_tier].db_first_time_s;
qm->plan.array[0].before = (qm->tiers[selected_tier].db_last_time_s > before_wanted) ? before_wanted : qm->tiers[selected_tier].db_last_time_s;
- if(!(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)) {
+ if(switch_tiers) {
// the selected tier
time_t selected_tier_first_time_s = qm->plan.array[0].after;
time_t selected_tier_last_time_s = qm->plan.array[0].before;
@@ -1150,7 +1372,7 @@ static bool query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before
// check if our selected tier can start the query
if (selected_tier_first_time_s > after_wanted) {
// we need some help from other tiers
- for (size_t tr = (int)selected_tier + 1; tr < storage_tiers; tr++) {
+ for (size_t tr = (int)selected_tier + 1; tr < storage_tiers && qm->plan.used < QUERY_PLANS_MAX ; tr++) {
if(!query_metric_is_valid_tier(qm, tr))
continue;
@@ -1164,9 +1386,9 @@ static bool query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before
.tier = tr,
.after = (tier_first_time_s < after_wanted) ? after_wanted : tier_first_time_s,
.before = selected_tier_first_time_s,
- .initialized = false,
- .finalized = false,
};
+ ops->plans[qm->plan.used].initialized = false;
+ ops->plans[qm->plan.used].finalized = false;
qm->plan.array[qm->plan.used++] = t;
internal_fatal(!t.after || !t.before, "QUERY: invalid plan selected");
@@ -1183,7 +1405,7 @@ static bool query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before
// check if our selected tier can finish the query
if (selected_tier_last_time_s < before_wanted) {
// we need some help from other tiers
- for (int tr = (int)selected_tier - 1; tr >= 0; tr--) {
+ for (int tr = (int)selected_tier - 1; tr >= 0 && qm->plan.used < QUERY_PLANS_MAX ; tr--) {
if(!query_metric_is_valid_tier(qm, tr))
continue;
@@ -1199,9 +1421,9 @@ static bool query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before
.tier = tr,
.after = selected_tier_last_time_s,
.before = (tier_last_time_s > before_wanted) ? before_wanted : tier_last_time_s,
- .initialized = false,
- .finalized = false,
};
+ ops->plans[qm->plan.used].initialized = false;
+ ops->plans[qm->plan.used].finalized = false;
qm->plan.array[qm->plan.used++] = t;
// prepare for the tier
@@ -1244,60 +1466,102 @@ static bool query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before
#define query_interpolate_point(this_point, last_point, now) do { \
if(likely( \
/* the point to interpolate is more than 1s wide */ \
- (this_point).end_time - (this_point).start_time > 1 \
+ (this_point).sp.end_time_s - (this_point).sp.start_time_s > 1 \
\
/* the two points are exactly next to each other */ \
- && (last_point).end_time == (this_point).start_time \
+ && (last_point).sp.end_time_s == (this_point).sp.start_time_s \
\
/* both points are valid numbers */ \
&& netdata_double_isnumber((this_point).value) \
&& netdata_double_isnumber((last_point).value) \
\
)) { \
- (this_point).value = (last_point).value + ((this_point).value - (last_point).value) * (1.0 - (NETDATA_DOUBLE)((this_point).end_time - (now)) / (NETDATA_DOUBLE)((this_point).end_time - (this_point).start_time)); \
- (this_point).end_time = now; \
+ (this_point).value = (last_point).value + ((this_point).value - (last_point).value) * (1.0 - (NETDATA_DOUBLE)((this_point).sp.end_time_s - (now)) / (NETDATA_DOUBLE)((this_point).sp.end_time_s - (this_point).sp.start_time_s)); \
+ (this_point).sp.end_time_s = now; \
} \
} while(0)
-#define query_add_point_to_group(r, point, ops) do { \
+#define query_add_point_to_group(r, point, ops, add_flush) do { \
if(likely(netdata_double_isnumber((point).value))) { \
if(likely(fpclassify((point).value) != FP_ZERO)) \
(ops)->group_points_non_zero++; \
\
- if(unlikely((point).flags & SN_FLAG_RESET)) \
+ if(unlikely((point).sp.flags & SN_FLAG_RESET)) \
(ops)->group_value_flags |= RRDR_VALUE_RESET; \
\
- (ops)->grouping_add(r, (point).value); \
+ time_grouping_add(r, (point).value, add_flush); \
+ \
+ storage_point_merge_to((ops)->group_point, (point).sp); \
+ if(!(point).added) \
+ storage_point_merge_to((ops)->query_point, (point).sp); \
} \
\
(ops)->group_points_added++; \
- (ops)->group_anomaly_rate += (point).anomaly; \
} while(0)
-static QUERY_ENGINE_OPS *rrd2rrdr_query_prep(RRDR *r, size_t dim_id_in_rrdr) {
+static __thread QUERY_ENGINE_OPS *released_ops = NULL;
+
+static void rrd2rrdr_query_ops_freeall(RRDR *r __maybe_unused) {
+ while(released_ops) {
+ QUERY_ENGINE_OPS *ops = released_ops;
+ released_ops = ops->next;
+
+ onewayalloc_freez(r->internal.owa, ops);
+ }
+}
+
+static void rrd2rrdr_query_ops_release(QUERY_ENGINE_OPS *ops) {
+ if(!ops) return;
+
+ ops->next = released_ops;
+ released_ops = ops;
+}
+
+static QUERY_ENGINE_OPS *rrd2rrdr_query_ops_get(RRDR *r) {
+ QUERY_ENGINE_OPS *ops;
+ if(released_ops) {
+ ops = released_ops;
+ released_ops = ops->next;
+ }
+ else {
+ ops = onewayalloc_mallocz(r->internal.owa, sizeof(QUERY_ENGINE_OPS));
+ }
+
+ memset(ops, 0, sizeof(*ops));
+ return ops;
+}
+
+static QUERY_ENGINE_OPS *rrd2rrdr_query_ops_prep(RRDR *r, size_t query_metric_id) {
QUERY_TARGET *qt = r->internal.qt;
- QUERY_ENGINE_OPS *ops = onewayalloc_mallocz(r->internal.owa, sizeof(QUERY_ENGINE_OPS));
+ QUERY_ENGINE_OPS *ops = rrd2rrdr_query_ops_get(r);
*ops = (QUERY_ENGINE_OPS) {
.r = r,
- .qm = &qt->query.array[dim_id_in_rrdr],
- .grouping_add = r->internal.grouping_add,
- .grouping_flush = r->internal.grouping_flush,
- .tier_query_fetch = r->internal.tier_query_fetch,
- .view_update_every = r->update_every,
- .query_granularity = (time_t)(r->update_every / r->group),
+ .qm = query_metric(qt, query_metric_id),
+ .tier_query_fetch = r->time_grouping.tier_query_fetch,
+ .view_update_every = r->view.update_every,
+ .query_granularity = (time_t)(r->view.update_every / r->view.group),
.group_value_flags = RRDR_VALUE_NOTHING,
};
- if(!query_plan(ops, qt->window.after, qt->window.before, qt->window.points))
+ if(!query_plan(ops, qt->window.after, qt->window.before, qt->window.points)) {
+ rrd2rrdr_query_ops_release(ops);
return NULL;
+ }
return ops;
}
static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_OPS *ops) {
QUERY_TARGET *qt = r->internal.qt;
- QUERY_METRIC *qm = &qt->query.array[dim_id_in_rrdr]; (void)qm;
+ QUERY_METRIC *qm = ops->qm;
+
+ const RRDR_TIME_GROUPING add_flush = r->time_grouping.add_flush;
+
+ ops->group_point = STORAGE_POINT_UNSET;
+ ops->query_point = STORAGE_POINT_UNSET;
+
+ RRDR_OPTIONS options = qt->window.options;
size_t points_wanted = qt->window.points;
time_t after_wanted = qt->window.after;
time_t before_wanted = qt->window.before; (void)before_wanted;
@@ -1306,15 +1570,12 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
// if(strcmp("user", string2str(rd->id)) == 0 && strcmp("system.cpu", string2str(rd->rrdset->id)) == 0)
// debug_this = true;
- time_t max_date = 0,
- min_date = 0;
-
size_t points_added = 0;
long rrdr_line = -1;
- bool use_anomaly_bit_as_value = (r->internal.query_options & RRDR_OPTION_ANOMALY_BIT) ? true : false;
+ bool use_anomaly_bit_as_value = (r->internal.qt->window.options & RRDR_OPTION_ANOMALY_BIT) ? true : false;
- NETDATA_DOUBLE min = r->min, max = r->max;
+ NETDATA_DOUBLE min = r->view.min, max = r->view.max;
QUERY_POINT last2_point = QUERY_POINT_EMPTY;
QUERY_POINT last1_point = QUERY_POINT_EMPTY;
@@ -1329,12 +1590,14 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
time_t now_end_time = after_wanted + ops->view_update_every - ops->query_granularity;
size_t db_points_read_since_plan_switch = 0; (void)db_points_read_since_plan_switch;
+ size_t query_is_finished_counter = 0;
// The main loop, based on the query granularity we need
- for( ; points_added < points_wanted ; now_start_time = now_end_time, now_end_time += ops->view_update_every) {
+ for( ; points_added < points_wanted && query_is_finished_counter <= 10 ;
+ now_start_time = now_end_time, now_end_time += ops->view_update_every) {
if(unlikely(query_plan_should_switch_plan(ops, now_end_time))) {
- query_planer_next_plan(ops, now_end_time, new_point.end_time);
+ query_planer_next_plan(ops, now_end_time, new_point.sp.end_time_s);
db_points_read_since_plan_switch = 0;
}
@@ -1347,26 +1610,35 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
last1_point = new_point;
}
- if(unlikely(ops->is_finished(ops->handle))) {
+ if(unlikely(storage_engine_query_is_finished(ops->handle))) {
+ query_is_finished_counter++;
+
if(count_same_end_time != 0) {
last2_point = last1_point;
last1_point = new_point;
}
new_point = QUERY_POINT_EMPTY;
- new_point.start_time = last1_point.end_time;
- new_point.end_time = now_end_time;
+ new_point.sp.start_time_s = last1_point.sp.end_time_s;
+ new_point.sp.end_time_s = now_end_time;
//
// if(debug_this) info("QUERY: is finished() returned true");
//
break;
}
+ else
+ query_is_finished_counter = 0;
// fetch the new point
{
STORAGE_POINT sp;
if(likely(storage_point_is_unset(next1_point))) {
db_points_read_since_plan_switch++;
- sp = ops->next_metric(ops->handle);
+ sp = storage_engine_query_next_metric(ops->handle);
+ ops->db_points_read_per_tier[ops->tier]++;
+ ops->db_total_points_read++;
+
+ if(unlikely(options & RRDR_OPTION_ABSOLUTE))
+ storage_point_make_positive(sp);
}
else {
// ONE POINT READ-AHEAD
@@ -1377,7 +1649,7 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
// ONE POINT READ-AHEAD
if(unlikely(query_plan_should_switch_plan(ops, sp.end_time_s) &&
- query_planer_next_plan(ops, now_end_time, new_point.end_time))) {
+ query_planer_next_plan(ops, now_end_time, new_point.sp.end_time_s))) {
// The end time of the current point, crosses our plans (tiers)
// so, we switched plan (tier)
@@ -1387,7 +1659,12 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
// A. the entire point of the previous plan is to the future of point from the next plan
// B. part of the point of the previous plan overlaps with the point from the next plan
- STORAGE_POINT sp2 = ops->next_metric(ops->handle);
+ STORAGE_POINT sp2 = storage_engine_query_next_metric(ops->handle);
+ ops->db_points_read_per_tier[ops->tier]++;
+ ops->db_total_points_read++;
+
+ if(unlikely(options & RRDR_OPTION_ABSOLUTE))
+ storage_point_make_positive(sp);
if(sp.start_time_s > sp2.start_time_s)
// the point from the previous plan is useless
@@ -1399,12 +1676,8 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
next1_point = sp2;
}
- ops->db_points_read_per_tier[ops->tier]++;
- ops->db_total_points_read++;
-
- new_point.start_time = sp.start_time_s;
- new_point.end_time = sp.end_time_s;
- new_point.anomaly = sp.count ? (NETDATA_DOUBLE)sp.anomaly_count * 100.0 / (NETDATA_DOUBLE)sp.count : 0.0;
+ new_point.sp = sp;
+ new_point.added = false;
query_point_set_id(new_point, ops->db_total_points_read);
// if(debug_this)
@@ -1415,13 +1688,13 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
if(likely(!storage_point_is_unset(sp) && !storage_point_is_gap(sp))) {
if(unlikely(use_anomaly_bit_as_value))
- new_point.value = new_point.anomaly;
+ new_point.value = storage_point_anomaly_rate(new_point.sp);
else {
switch (ops->tier_query_fetch) {
default:
case TIER_QUERY_FETCH_AVERAGE:
- new_point.value = sp.sum / sp.count;
+ new_point.value = sp.sum / (NETDATA_DOUBLE)sp.count;
break;
case TIER_QUERY_FETCH_MIN:
@@ -1438,36 +1711,34 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
};
}
}
- else {
+ else
new_point.value = NAN;
- new_point.flags = SN_FLAG_NONE;
- }
}
// check if the db is giving us zero duration points
if(unlikely(db_points_read_since_plan_switch > 1 &&
- new_point.start_time == new_point.end_time)) {
+ new_point.sp.start_time_s == new_point.sp.end_time_s)) {
internal_error(true, "QUERY: '%s', dimension '%s' next_metric() returned "
"point %zu from %ld to %ld, that are both equal",
- qt->id, string2str(qm->dimension.id),
- new_point.id, new_point.start_time, new_point.end_time);
+ qt->id, query_metric_id(qt, qm),
+ new_point.id, new_point.sp.start_time_s, new_point.sp.end_time_s);
- new_point.start_time = new_point.end_time - ops->tier_ptr->db_update_every_s;
+ new_point.sp.start_time_s = new_point.sp.end_time_s - ops->tier_ptr->db_update_every_s;
}
// check if the db is advancing the query
if(unlikely(db_points_read_since_plan_switch > 1 &&
- new_point.end_time <= last1_point.end_time)) {
+ new_point.sp.end_time_s <= last1_point.sp.end_time_s)) {
internal_error(true,
"QUERY: '%s', dimension '%s' next_metric() returned "
"point %zu from %ld to %ld, before the "
"last point %zu from %ld to %ld, "
"now is %ld to %ld",
- qt->id, string2str(qm->dimension.id),
- new_point.id, new_point.start_time, new_point.end_time,
- last1_point.id, last1_point.start_time, last1_point.end_time,
+ qt->id, query_metric_id(qt, qm),
+ new_point.id, new_point.sp.start_time_s, new_point.sp.end_time_s,
+ last1_point.id, last1_point.sp.start_time_s, last1_point.sp.end_time_s,
now_start_time, now_end_time);
count_same_end_time++;
@@ -1476,13 +1747,14 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
count_same_end_time = 0;
// decide how to use this point
- if(likely(new_point.end_time < now_end_time)) { // likely to favor tier0
+ if(likely(new_point.sp.end_time_s < now_end_time)) { // likely to favor tier0
// this db point ends before our now_end_time
- if(likely(new_point.end_time >= now_start_time)) { // likely to favor tier0
+ if(likely(new_point.sp.end_time_s >= now_start_time)) { // likely to favor tier0
// this db point ends after our now_start time
- query_add_point_to_group(r, new_point, ops);
+ query_add_point_to_group(r, new_point, ops, add_flush);
+ new_point.added = true;
}
else {
// we don't need this db point
@@ -1493,14 +1765,14 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
// at exactly the time we will want
// we only log if this is not point 1
- internal_error(new_point.end_time < ops->plan_expanded_after &&
+ internal_error(new_point.sp.end_time_s < ops->plan_expanded_after &&
db_points_read_since_plan_switch > 1,
"QUERY: '%s', dimension '%s' next_metric() "
"returned point %zu from %ld time %ld, "
"which is entirely before our current timeframe %ld to %ld "
"(and before the entire query, after %ld, before %ld)",
- qt->id, string2str(qm->dimension.id),
- new_point.id, new_point.start_time, new_point.end_time,
+ qt->id, query_metric_id(qt, qm),
+ new_point.id, new_point.sp.start_time_s, new_point.sp.end_time_s,
now_start_time, now_end_time,
ops->plan_expanded_after, ops->plan_expanded_before);
}
@@ -1518,15 +1790,15 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
"QUERY: '%s', dimension '%s', the database does not advance the query,"
" it returned an end time less or equal to the end time of the last "
"point we got %ld, %zu times",
- qt->id, string2str(qm->dimension.id),
- last1_point.end_time, count_same_end_time);
+ qt->id, query_metric_id(qt, qm),
+ last1_point.sp.end_time_s, count_same_end_time);
- if(unlikely(new_point.end_time <= last1_point.end_time))
- new_point.end_time = now_end_time;
+ if(unlikely(new_point.sp.end_time_s <= last1_point.sp.end_time_s))
+ new_point.sp.end_time_s = now_end_time;
}
- time_t stop_time = new_point.end_time;
- if(unlikely(!storage_point_is_unset(next1_point))) {
+ time_t stop_time = new_point.sp.end_time_s;
+ if(unlikely(!storage_point_is_unset(next1_point) && next1_point.start_time_s >= now_end_time)) {
// ONE POINT READ-AHEAD
// the point crosses the start time of the
// read ahead storage point we have read
@@ -1537,18 +1809,20 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
// we have 3 points in memory: last2, last1, new
// we select the one to use based on their timestamps
- size_t iterations = 0;
- for ( ; now_end_time <= stop_time && points_added < points_wanted ;
- now_end_time += ops->view_update_every, iterations++) {
+ internal_fatal(now_end_time > stop_time || points_added >= points_wanted,
+ "QUERY: first part of query provides invalid point to interpolate (now_end_time %ld, stop_time %ld",
+ now_end_time, stop_time);
+ do {
// now_start_time is wrong in this loop
// but, we don't need it
QUERY_POINT current_point;
- if(likely(now_end_time > new_point.start_time)) {
+ if(likely(now_end_time > new_point.sp.start_time_s)) {
// it is time for our NEW point to be used
current_point = new_point;
+ new_point.added = true; // first copy, then set it, so that new_point will not be added again
query_interpolate_point(current_point, last1_point, now_end_time);
// internal_error(current_point.id > 0
@@ -1564,9 +1838,10 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
// current_point.id, current_point.start_time, current_point.end_time,
// now_end_time);
}
- else if(likely(now_end_time <= last1_point.end_time)) {
+ else if(likely(now_end_time <= last1_point.sp.end_time_s)) {
// our LAST point is still valid
current_point = last1_point;
+ last1_point.added = true; // first copy, then set it, so that last1_point will not be added again
query_interpolate_point(current_point, last2_point, now_end_time);
// internal_error(current_point.id > 0
@@ -1586,14 +1861,11 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
current_point = QUERY_POINT_EMPTY;
}
- query_add_point_to_group(r, current_point, ops);
+ query_add_point_to_group(r, current_point, ops, add_flush);
rrdr_line = rrdr_line_init(r, now_end_time, rrdr_line);
size_t rrdr_o_v_index = rrdr_line * r->d + dim_id_in_rrdr;
- if(unlikely(!min_date)) min_date = now_end_time;
- max_date = now_end_time;
-
// find the place to store our values
RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_o_v_index];
@@ -1605,15 +1877,12 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
*rrdr_value_options_ptr = ops->group_value_flags;
// store the group value
- NETDATA_DOUBLE group_value = ops->grouping_flush(r, rrdr_value_options_ptr);
+ NETDATA_DOUBLE group_value = time_grouping_flush(r, rrdr_value_options_ptr, add_flush);
r->v[rrdr_o_v_index] = group_value;
- // we only store uint8_t anomaly rates,
- // so let's get double precision by storing
- // anomaly rates in the range 0 - 200
- r->ar[rrdr_o_v_index] = ops->group_anomaly_rate / (NETDATA_DOUBLE)ops->group_points_added;
+ r->ar[rrdr_o_v_index] = storage_point_anomaly_rate(ops->group_point);
- if(likely(points_added || dim_id_in_rrdr)) {
+ if(likely(points_added || r->internal.queries_count)) {
// find the min/max across all dimensions
if(unlikely(group_value < min)) min = group_value;
@@ -1621,7 +1890,7 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
}
else {
- // runs only when dim_id_in_rrdr == 0 && points_added == 0
+ // runs only when r->internal.queries_count == 0 && points_added == 0
// so, on the first point added for the query.
min = max = group_value;
}
@@ -1630,31 +1899,38 @@ static void rrd2rrdr_query_execute(RRDR *r, size_t dim_id_in_rrdr, QUERY_ENGINE_
ops->group_points_added = 0;
ops->group_value_flags = RRDR_VALUE_NOTHING;
ops->group_points_non_zero = 0;
- ops->group_anomaly_rate = 0;
- }
- // the loop above increased "now" by query_granularity,
+ ops->group_point = STORAGE_POINT_UNSET;
+
+ now_end_time += ops->view_update_every;
+ } while(now_end_time <= stop_time && points_added < points_wanted);
+
+ // the loop above increased "now" by ops->view_update_every,
// but the main loop will increase it too,
// so, let's undo the last iteration of this loop
- if(iterations)
- now_end_time -= ops->view_update_every;
+ now_end_time -= ops->view_update_every;
}
query_planer_finalize_remaining_plans(ops);
- r->internal.result_points_generated += points_added;
- r->internal.db_points_read += ops->db_total_points_read;
+ qm->query_points = ops->query_point;
+
+ // fill the rest of the points with empty values
+ while (points_added < points_wanted) {
+ rrdr_line++;
+ size_t rrdr_o_v_index = rrdr_line * r->d + dim_id_in_rrdr;
+ r->o[rrdr_o_v_index] = RRDR_VALUE_EMPTY;
+ r->v[rrdr_o_v_index] = 0.0;
+ r->ar[rrdr_o_v_index] = 0.0;
+ points_added++;
+ }
+
+ r->internal.queries_count++;
+ r->view.min = min;
+ r->view.max = max;
+
+ r->stats.result_points_generated += points_added;
+ r->stats.db_points_read += ops->db_total_points_read;
for(size_t tr = 0; tr < storage_tiers ; tr++)
- r->internal.tier_points_read[tr] += ops->db_points_read_per_tier[tr];
-
- r->min = min;
- r->max = max;
- r->before = max_date;
- r->after = min_date - ops->view_update_every + ops->query_granularity;
- rrdr_done(r, rrdr_line);
-
- internal_error(points_added != points_wanted,
- "QUERY: '%s', dimension '%s', requested %zu points, but RRDR added %zu (%zu db points read).",
- qt->id, string2str(qm->dimension.id),
- (size_t)points_wanted, (size_t)points_added, ops->db_total_points_read);
+ qt->db.tiers[tr].points += ops->db_points_read_per_tier[tr];
}
// ----------------------------------------------------------------------------
@@ -1669,7 +1945,7 @@ void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, size_t tier, time_t now_s
struct rrddim_tier *t = &rd->tiers[tier];
if(unlikely(!t)) return;
- time_t latest_time_s = t->query_ops->latest_time_s(t->db_metric_handle);
+ time_t latest_time_s = storage_engine_latest_time_s(t->backend, t->db_metric_handle);
time_t granularity = (time_t)t->tier_grouping * (time_t)rd->update_every;
time_t time_diff = now_s - latest_time_s;
@@ -1683,21 +1959,21 @@ void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, size_t tier, time_t now_s
// for each lower tier
for(int read_tier = (int)tier - 1; read_tier >= 0 ; read_tier--){
- time_t smaller_tier_first_time = rd->tiers[read_tier].query_ops->oldest_time_s(rd->tiers[read_tier].db_metric_handle);
- time_t smaller_tier_last_time = rd->tiers[read_tier].query_ops->latest_time_s(rd->tiers[read_tier].db_metric_handle);
+ time_t smaller_tier_first_time = storage_engine_oldest_time_s(rd->tiers[read_tier].backend, rd->tiers[read_tier].db_metric_handle);
+ time_t smaller_tier_last_time = storage_engine_latest_time_s(rd->tiers[read_tier].backend, rd->tiers[read_tier].db_metric_handle);
if(smaller_tier_last_time <= latest_time_s) continue; // it is as bad as we are
long after_wanted = (latest_time_s < smaller_tier_first_time) ? smaller_tier_first_time : latest_time_s;
long before_wanted = smaller_tier_last_time;
struct rrddim_tier *tmp = &rd->tiers[read_tier];
- tmp->query_ops->init(tmp->db_metric_handle, &handle, after_wanted, before_wanted, STORAGE_PRIORITY_HIGH);
+ storage_engine_query_init(tmp->backend, tmp->db_metric_handle, &handle, after_wanted, before_wanted, STORAGE_PRIORITY_HIGH);
size_t points_read = 0;
- while(!tmp->query_ops->is_finished(&handle)) {
+ while(!storage_engine_query_is_finished(&handle)) {
- STORAGE_POINT sp = tmp->query_ops->next_metric(&handle);
+ STORAGE_POINT sp = storage_engine_query_next_metric(&handle);
points_read++;
if(sp.end_time_s > latest_time_s) {
@@ -1706,7 +1982,7 @@ void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, size_t tier, time_t now_s
}
}
- tmp->query_ops->finalize(&handle);
+ storage_engine_query_finalize(&handle);
store_metric_collection_completed();
global_statistics_backfill_query_completed(points_read);
@@ -1721,7 +1997,7 @@ void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, size_t tier, time_t now_s
#ifdef NETDATA_INTERNAL_CHECKS
static void rrd2rrdr_log_request_response_metadata(RRDR *r
, RRDR_OPTIONS options __maybe_unused
- , RRDR_GROUPING group_method
+ , RRDR_TIME_GROUPING group_method
, bool aligned
, size_t group
, time_t resampling_time
@@ -1737,8 +2013,9 @@ static void rrd2rrdr_log_request_response_metadata(RRDR *r
, const char *msg
) {
- time_t first_entry_s = r->internal.qt->db.first_time_s;
- time_t last_entry_s = r->internal.qt->db.last_time_s;
+ QUERY_TARGET *qt = r->internal.qt;
+ time_t first_entry_s = qt->db.first_time_s;
+ time_t last_entry_s = qt->db.last_time_s;
internal_error(
true,
@@ -1748,33 +2025,33 @@ static void rrd2rrdr_log_request_response_metadata(RRDR *r
"duration (got: %ld, want: %ld, req: %ld, db: %ld), "
"points (got: %zu, want: %zu, req: %zu), "
"%s"
- , r->internal.qt->id
- , r->internal.qt->window.query_granularity
+ , qt->id
+ , qt->window.query_granularity
// grouping
, (aligned) ? "aligned" : "unaligned"
- , group_method2string(group_method)
+ , time_grouping_method2string(group_method)
, group
, resampling_time
, resampling_group
// after
- , r->after
+ , r->view.after
, after_wanted
, after_requested
, first_entry_s
// before
- , r->before
+ , r->view.before
, before_wanted
, before_requested
, last_entry_s
// duration
- , (long)(r->before - r->after + r->internal.qt->window.query_granularity)
- , (long)(before_wanted - after_wanted + r->internal.qt->window.query_granularity)
+ , (long)(r->view.before - r->view.after + qt->window.query_granularity)
+ , (long)(before_wanted - after_wanted + qt->window.query_granularity)
, (long)before_requested - after_requested
- , (long)((last_entry_s - first_entry_s) + r->internal.qt->window.query_granularity)
+ , (long)((last_entry_s - first_entry_s) + qt->window.query_granularity)
// points
, r->rows
@@ -1788,9 +2065,12 @@ static void rrd2rrdr_log_request_response_metadata(RRDR *r
#endif // NETDATA_INTERNAL_CHECKS
// Returns 1 if an absolute period was requested or 0 if it was a relative period
-bool rrdr_relative_window_to_absolute(time_t *after, time_t *before) {
+bool rrdr_relative_window_to_absolute(time_t *after, time_t *before, time_t *now_ptr) {
time_t now = now_realtime_sec() - 1;
+ if(now_ptr)
+ *now_ptr = now;
+
int absolute_period_requested = -1;
long long after_requested, before_requested;
@@ -1890,11 +2170,11 @@ bool query_target_calculate_window(QUERY_TARGET *qt) {
size_t points_requested = (long)qt->request.points;
time_t after_requested = qt->request.after;
time_t before_requested = qt->request.before;
- RRDR_GROUPING group_method = qt->request.group_method;
+ RRDR_TIME_GROUPING group_method = qt->request.time_group_method;
time_t resampling_time_requested = qt->request.resampling_time;
- RRDR_OPTIONS options = qt->request.options;
+ RRDR_OPTIONS options = qt->window.options;
size_t tier = qt->request.tier;
- time_t update_every = qt->db.minimum_latest_update_every_s;
+ time_t update_every = qt->db.minimum_latest_update_every_s ? qt->db.minimum_latest_update_every_s : 1;
// RULES
// points_requested = 0
@@ -1953,27 +2233,36 @@ bool query_target_calculate_window(QUERY_TARGET *qt) {
time_t last_entry_s = qt->db.last_time_s;
if (first_entry_s == 0 || last_entry_s == 0) {
- internal_error(true, "QUERY: no data detected on query '%s' (db first_entry_t = %ld, last_entry_t = %ld", qt->id, first_entry_s, last_entry_s);
- query_debug_log_free();
- return false;
- }
+ internal_error(true, "QUERY: no data detected on query '%s' (db first_entry_t = %ld, last_entry_t = %ld)", qt->id, first_entry_s, last_entry_s);
+ after_wanted = qt->window.after;
+ before_wanted = qt->window.before;
- query_debug_log(":first_entry_t %ld, last_entry_t %ld", first_entry_s, last_entry_s);
+ if(after_wanted == before_wanted)
+ after_wanted = before_wanted - update_every;
- if (after_wanted == 0) {
- after_wanted = first_entry_s;
- query_debug_log(":zero after_wanted %ld", after_wanted);
+ if (points_wanted == 0) {
+ points_wanted = (before_wanted - after_wanted) / update_every;
+ query_debug_log(":zero points_wanted %zu", points_wanted);
+ }
}
+ else {
+ query_debug_log(":first_entry_t %ld, last_entry_t %ld", first_entry_s, last_entry_s);
- if (before_wanted == 0) {
- before_wanted = last_entry_s;
- before_is_aligned_to_db_end = true;
- query_debug_log(":zero before_wanted %ld", before_wanted);
- }
+ if (after_wanted == 0) {
+ after_wanted = first_entry_s;
+ query_debug_log(":zero after_wanted %ld", after_wanted);
+ }
- if (points_wanted == 0) {
- points_wanted = (last_entry_s - first_entry_s) / update_every;
- query_debug_log(":zero points_wanted %zu", points_wanted);
+ if (before_wanted == 0) {
+ before_wanted = last_entry_s;
+ before_is_aligned_to_db_end = true;
+ query_debug_log(":zero before_wanted %ld", before_wanted);
+ }
+
+ if (points_wanted == 0) {
+ points_wanted = (last_entry_s - first_entry_s) / update_every;
+ query_debug_log(":zero points_wanted %zu", points_wanted);
+ }
}
}
@@ -1983,7 +2272,7 @@ bool query_target_calculate_window(QUERY_TARGET *qt) {
}
// convert our before_wanted and after_wanted to absolute
- rrdr_relative_window_to_absolute(&after_wanted, &before_wanted);
+ rrdr_relative_window_to_absolute(&after_wanted, &before_wanted, NULL);
query_debug_log(":relative2absolute after %ld, before %ld", after_wanted, before_wanted);
if (natural_points && (options & RRDR_OPTION_SELECTED_TIER) && tier > 0 && storage_tiers > 1) {
@@ -2145,8 +2434,8 @@ bool query_target_calculate_window(QUERY_TARGET *qt) {
qt->window.relative = relative_period_requested;
qt->window.points = points_wanted;
qt->window.group = group;
- qt->window.group_method = group_method;
- qt->window.group_options = qt->request.group_options;
+ qt->window.time_group_method = group_method;
+ qt->window.time_group_options = qt->request.time_group_options;
qt->window.query_granularity = query_granularity;
qt->window.resampling_group = resampling_group;
qt->window.resampling_divisor = resampling_divisor;
@@ -2157,80 +2446,1081 @@ bool query_target_calculate_window(QUERY_TARGET *qt) {
return true;
}
+// ----------------------------------------------------------------------------
+// group by
+
+struct group_by_label_key {
+ DICTIONARY *values;
+};
+
+static void group_by_label_key_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) {
+ // add the key to our r->label_keys global keys dictionary
+ DICTIONARY *label_keys = data;
+ dictionary_set(label_keys, dictionary_acquired_item_name(item), NULL, 0);
+
+ // create a dictionary for the values of this key
+ struct group_by_label_key *k = value;
+ k->values = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, 0);
+}
+
+static void group_by_label_key_delete_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct group_by_label_key *k = value;
+ dictionary_destroy(k->values);
+}
+
+static int rrdlabels_traversal_cb_to_group_by_label_key(const char *name, const char *value, RRDLABEL_SRC ls __maybe_unused, void *data) {
+ DICTIONARY *dl = data;
+ struct group_by_label_key *k = dictionary_set(dl, name, NULL, sizeof(struct group_by_label_key));
+ dictionary_set(k->values, value, NULL, 0);
+ return 1;
+}
+
+void rrdr_json_group_by_labels(BUFFER *wb, const char *key, RRDR *r, RRDR_OPTIONS options) {
+ if(!r->label_keys || !r->dl)
+ return;
+
+ buffer_json_member_add_object(wb, key);
+
+ void *t;
+ dfe_start_read(r->label_keys, t) {
+ buffer_json_member_add_array(wb, t_dfe.name);
+
+ for(size_t d = 0; d < r->d ;d++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[d], options))
+ continue;
+
+ struct group_by_label_key *k = dictionary_get(r->dl[d], t_dfe.name);
+ if(k) {
+ buffer_json_add_array_item_array(wb);
+ void *tt;
+ dfe_start_read(k->values, tt) {
+ buffer_json_add_array_item_string(wb, tt_dfe.name);
+ }
+ dfe_done(tt);
+ buffer_json_array_close(wb);
+ }
+ else
+ buffer_json_add_array_item_string(wb, NULL);
+ }
+
+ buffer_json_array_close(wb);
+ }
+ dfe_done(t);
+
+ buffer_json_object_close(wb); // key
+}
+
+static int group_by_label_is_space(char c) {
+ if(c == ',' || c == '|')
+ return 1;
+
+ return 0;
+}
+
+static void rrd2rrdr_set_timestamps(RRDR *r) {
+ QUERY_TARGET *qt = r->internal.qt;
+
+ internal_fatal(qt->window.points != r->n, "QUERY: mismatch to the number of points in qt and r");
+
+ r->view.group = qt->window.group;
+ r->view.update_every = (int) query_view_update_every(qt);
+ r->view.before = qt->window.before;
+ r->view.after = qt->window.after;
+
+ r->time_grouping.points_wanted = qt->window.points;
+ r->time_grouping.resampling_group = qt->window.resampling_group;
+ r->time_grouping.resampling_divisor = qt->window.resampling_divisor;
+
+ r->rows = qt->window.points;
+
+ size_t points_wanted = qt->window.points;
+ time_t after_wanted = qt->window.after;
+ time_t before_wanted = qt->window.before; (void)before_wanted;
+
+ time_t view_update_every = r->view.update_every;
+ time_t query_granularity = (time_t)(r->view.update_every / r->view.group);
+
+ size_t rrdr_line = 0;
+ time_t first_point_end_time = after_wanted + view_update_every - query_granularity;
+ time_t now_end_time = first_point_end_time;
+
+ while (rrdr_line < points_wanted) {
+ r->t[rrdr_line++] = now_end_time;
+ now_end_time += view_update_every;
+ }
+
+ internal_fatal(r->t[0] != first_point_end_time, "QUERY: wrong first timestamp in the query");
+ internal_error(r->t[points_wanted - 1] != before_wanted,
+ "QUERY: wrong last timestamp in the query, expected %ld, found %ld",
+ before_wanted, r->t[points_wanted - 1]);
+}
+
+static void query_group_by_make_dimension_key(BUFFER *key, RRDR_GROUP_BY group_by, size_t group_by_id, QUERY_TARGET *qt, QUERY_NODE *qn, QUERY_CONTEXT *qc, QUERY_INSTANCE *qi, QUERY_DIMENSION *qd __maybe_unused, QUERY_METRIC *qm, bool query_has_percentage_of_instance) {
+ buffer_flush(key);
+ if(unlikely(!query_has_percentage_of_instance && qm->status & RRDR_DIMENSION_HIDDEN)) {
+ buffer_strcat(key, "__hidden_dimensions__");
+ }
+ else if(unlikely(group_by & RRDR_GROUP_BY_SELECTED)) {
+ buffer_strcat(key, "selected");
+ }
+ else {
+ if (group_by & RRDR_GROUP_BY_DIMENSION) {
+ buffer_fast_strcat(key, "|", 1);
+ buffer_strcat(key, query_metric_name(qt, qm));
+ }
+
+ if (group_by & (RRDR_GROUP_BY_INSTANCE|RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)) {
+ buffer_fast_strcat(key, "|", 1);
+ buffer_strcat(key, string2str(query_instance_id_fqdn(qi, qt->request.version)));
+ }
+
+ if (group_by & RRDR_GROUP_BY_LABEL) {
+ DICTIONARY *labels = rrdinstance_acquired_labels(qi->ria);
+ for (size_t l = 0; l < qt->group_by[group_by_id].used; l++) {
+ buffer_fast_strcat(key, "|", 1);
+ rrdlabels_get_value_to_buffer_or_unset(labels, key, qt->group_by[group_by_id].label_keys[l], "[unset]");
+ }
+ }
+
+ if (group_by & RRDR_GROUP_BY_NODE) {
+ buffer_fast_strcat(key, "|", 1);
+ buffer_strcat(key, qn->rrdhost->machine_guid);
+ }
+
+ if (group_by & RRDR_GROUP_BY_CONTEXT) {
+ buffer_fast_strcat(key, "|", 1);
+ buffer_strcat(key, rrdcontext_acquired_id(qc->rca));
+ }
+
+ if (group_by & RRDR_GROUP_BY_UNITS) {
+ buffer_fast_strcat(key, "|", 1);
+ buffer_strcat(key, query_target_has_percentage_units(qt) ? "%" : rrdinstance_acquired_units(qi->ria));
+ }
+ }
+}
+
+static void query_group_by_make_dimension_id(BUFFER *key, RRDR_GROUP_BY group_by, size_t group_by_id, QUERY_TARGET *qt, QUERY_NODE *qn, QUERY_CONTEXT *qc, QUERY_INSTANCE *qi, QUERY_DIMENSION *qd __maybe_unused, QUERY_METRIC *qm, bool query_has_percentage_of_instance) {
+ buffer_flush(key);
+ if(unlikely(!query_has_percentage_of_instance && qm->status & RRDR_DIMENSION_HIDDEN)) {
+ buffer_strcat(key, "__hidden_dimensions__");
+ }
+ else if(unlikely(group_by & RRDR_GROUP_BY_SELECTED)) {
+ buffer_strcat(key, "selected");
+ }
+ else {
+ if (group_by & RRDR_GROUP_BY_DIMENSION) {
+ buffer_strcat(key, query_metric_name(qt, qm));
+ }
+
+ if (group_by & (RRDR_GROUP_BY_INSTANCE|RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)) {
+ if (buffer_strlen(key) != 0)
+ buffer_fast_strcat(key, ",", 1);
+
+ if (group_by & RRDR_GROUP_BY_NODE)
+ buffer_strcat(key, rrdinstance_acquired_id(qi->ria));
+ else
+ buffer_strcat(key, string2str(query_instance_id_fqdn(qi, qt->request.version)));
+ }
+
+ if (group_by & RRDR_GROUP_BY_LABEL) {
+ DICTIONARY *labels = rrdinstance_acquired_labels(qi->ria);
+ for (size_t l = 0; l < qt->group_by[group_by_id].used; l++) {
+ if (buffer_strlen(key) != 0)
+ buffer_fast_strcat(key, ",", 1);
+ rrdlabels_get_value_to_buffer_or_unset(labels, key, qt->group_by[group_by_id].label_keys[l], "[unset]");
+ }
+ }
+
+ if (group_by & RRDR_GROUP_BY_NODE) {
+ if (buffer_strlen(key) != 0)
+ buffer_fast_strcat(key, ",", 1);
+
+ buffer_strcat(key, qn->rrdhost->machine_guid);
+ }
+
+ if (group_by & RRDR_GROUP_BY_CONTEXT) {
+ if (buffer_strlen(key) != 0)
+ buffer_fast_strcat(key, ",", 1);
+
+ buffer_strcat(key, rrdcontext_acquired_id(qc->rca));
+ }
+
+ if (group_by & RRDR_GROUP_BY_UNITS) {
+ if (buffer_strlen(key) != 0)
+ buffer_fast_strcat(key, ",", 1);
+
+ buffer_strcat(key, query_target_has_percentage_units(qt) ? "%" : rrdinstance_acquired_units(qi->ria));
+ }
+ }
+}
+
+static void query_group_by_make_dimension_name(BUFFER *key, RRDR_GROUP_BY group_by, size_t group_by_id, QUERY_TARGET *qt, QUERY_NODE *qn, QUERY_CONTEXT *qc, QUERY_INSTANCE *qi, QUERY_DIMENSION *qd __maybe_unused, QUERY_METRIC *qm, bool query_has_percentage_of_instance) {
+ buffer_flush(key);
+ if(unlikely(!query_has_percentage_of_instance && qm->status & RRDR_DIMENSION_HIDDEN)) {
+ buffer_strcat(key, "__hidden_dimensions__");
+ }
+ else if(unlikely(group_by & RRDR_GROUP_BY_SELECTED)) {
+ buffer_strcat(key, "selected");
+ }
+ else {
+ if (group_by & RRDR_GROUP_BY_DIMENSION) {
+ buffer_strcat(key, query_metric_name(qt, qm));
+ }
+
+ if (group_by & (RRDR_GROUP_BY_INSTANCE|RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)) {
+ if (buffer_strlen(key) != 0)
+ buffer_fast_strcat(key, ",", 1);
+
+ if (group_by & RRDR_GROUP_BY_NODE)
+ buffer_strcat(key, rrdinstance_acquired_name(qi->ria));
+ else
+ buffer_strcat(key, string2str(query_instance_name_fqdn(qi, qt->request.version)));
+ }
+
+ if (group_by & RRDR_GROUP_BY_LABEL) {
+ DICTIONARY *labels = rrdinstance_acquired_labels(qi->ria);
+ for (size_t l = 0; l < qt->group_by[group_by_id].used; l++) {
+ if (buffer_strlen(key) != 0)
+ buffer_fast_strcat(key, ",", 1);
+ rrdlabels_get_value_to_buffer_or_unset(labels, key, qt->group_by[group_by_id].label_keys[l], "[unset]");
+ }
+ }
+
+ if (group_by & RRDR_GROUP_BY_NODE) {
+ if (buffer_strlen(key) != 0)
+ buffer_fast_strcat(key, ",", 1);
+
+ buffer_strcat(key, rrdhost_hostname(qn->rrdhost));
+ }
+
+ if (group_by & RRDR_GROUP_BY_CONTEXT) {
+ if (buffer_strlen(key) != 0)
+ buffer_fast_strcat(key, ",", 1);
+
+ buffer_strcat(key, rrdcontext_acquired_id(qc->rca));
+ }
+
+ if (group_by & RRDR_GROUP_BY_UNITS) {
+ if (buffer_strlen(key) != 0)
+ buffer_fast_strcat(key, ",", 1);
+
+ buffer_strcat(key, query_target_has_percentage_units(qt) ? "%" : rrdinstance_acquired_units(qi->ria));
+ }
+ }
+}
+
+struct rrdr_group_by_entry {
+ size_t priority;
+ size_t count;
+ STRING *id;
+ STRING *name;
+ STRING *units;
+ RRDR_DIMENSION_FLAGS od;
+ DICTIONARY *dl;
+};
+
+static RRDR *rrd2rrdr_group_by_initialize(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
+ RRDR *r_tmp = NULL;
+ RRDR_OPTIONS options = qt->window.options;
+
+ if(qt->request.version < 2) {
+ // v1 query
+ RRDR *r = rrdr_create(owa, qt, qt->query.used, qt->window.points);
+ if(unlikely(!r)) {
+ internal_error(true, "QUERY: cannot create RRDR for %s, after=%ld, before=%ld, dimensions=%u, points=%zu",
+ qt->id, qt->window.after, qt->window.before, qt->query.used, qt->window.points);
+ return NULL;
+ }
+ r->group_by.r = NULL;
+
+ for(size_t d = 0; d < qt->query.used ; d++) {
+ QUERY_METRIC *qm = query_metric(qt, d);
+ QUERY_DIMENSION *qd = query_dimension(qt, qm->link.query_dimension_id);
+ r->di[d] = rrdmetric_acquired_id_dup(qd->rma);
+ r->dn[d] = rrdmetric_acquired_name_dup(qd->rma);
+ }
+
+ rrd2rrdr_set_timestamps(r);
+ return r;
+ }
+ // v2 query
+
+ // parse all the group-by label keys
+ for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
+ if (qt->request.group_by[g].group_by & RRDR_GROUP_BY_LABEL &&
+ qt->request.group_by[g].group_by_label && *qt->request.group_by[g].group_by_label)
+ qt->group_by[g].used = quoted_strings_splitter(
+ qt->request.group_by[g].group_by_label, qt->group_by[g].label_keys,
+ GROUP_BY_MAX_LABEL_KEYS, group_by_label_is_space);
+
+ if (!qt->group_by[g].used)
+ qt->request.group_by[g].group_by &= ~RRDR_GROUP_BY_LABEL;
+ }
+
+ // make sure there are valid group-by methods
+ bool query_has_percentage_of_instance = false;
+ for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES - 1 ;g++) {
+ if(!(qt->request.group_by[g].group_by & SUPPORTED_GROUP_BY_METHODS))
+ qt->request.group_by[g].group_by = (g == 0) ? RRDR_GROUP_BY_DIMENSION : RRDR_GROUP_BY_NONE;
+
+ if(qt->request.group_by[g].group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)
+ query_has_percentage_of_instance = true;
+ }
+
+ // merge all group-by options to upper levels
+ for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES - 1 ;g++) {
+ if(qt->request.group_by[g].group_by == RRDR_GROUP_BY_NONE)
+ continue;
+
+ if(qt->request.group_by[g].group_by == RRDR_GROUP_BY_SELECTED) {
+ for (size_t r = g + 1; r < MAX_QUERY_GROUP_BY_PASSES; r++)
+ qt->request.group_by[r].group_by = RRDR_GROUP_BY_NONE;
+ }
+ else {
+ for (size_t r = g + 1; r < MAX_QUERY_GROUP_BY_PASSES; r++) {
+ if (qt->request.group_by[r].group_by == RRDR_GROUP_BY_NONE)
+ continue;
+
+ if (qt->request.group_by[r].group_by != RRDR_GROUP_BY_SELECTED) {
+ if(qt->request.group_by[r].group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)
+ qt->request.group_by[g].group_by |= RRDR_GROUP_BY_INSTANCE;
+ else
+ qt->request.group_by[g].group_by |= qt->request.group_by[r].group_by;
+
+ if(qt->request.group_by[r].group_by & RRDR_GROUP_BY_LABEL) {
+ for (size_t lr = 0; lr < qt->group_by[r].used; lr++) {
+ bool found = false;
+ for (size_t lg = 0; lg < qt->group_by[g].used; lg++) {
+ if (strcmp(qt->group_by[g].label_keys[lg], qt->group_by[r].label_keys[lr]) == 0) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found && qt->group_by[g].used < GROUP_BY_MAX_LABEL_KEYS * MAX_QUERY_GROUP_BY_PASSES)
+ qt->group_by[g].label_keys[qt->group_by[g].used++] = qt->group_by[r].label_keys[lr];
+ }
+ }
+ }
+ }
+ }
+ }
+
+ int added = 0;
+ RRDR *first_r = NULL, *last_r = NULL;
+ BUFFER *key = buffer_create(0, NULL);
+ struct rrdr_group_by_entry *entries = onewayalloc_mallocz(owa, qt->query.used * sizeof(struct rrdr_group_by_entry));
+ DICTIONARY *groups = dictionary_create(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE);
+ DICTIONARY *label_keys = NULL;
+
+ for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
+ RRDR_GROUP_BY group_by = qt->request.group_by[g].group_by;
+
+ if(group_by == RRDR_GROUP_BY_NONE)
+ break;
+
+ memset(entries, 0, qt->query.used * sizeof(struct rrdr_group_by_entry));
+ dictionary_flush(groups);
+ added = 0;
+
+ size_t hidden_dimensions = 0;
+ bool final_grouping = (g == MAX_QUERY_GROUP_BY_PASSES - 1 || qt->request.group_by[g + 1].group_by == RRDR_GROUP_BY_NONE) ? true : false;
+
+ if (final_grouping && (options & RRDR_OPTION_GROUP_BY_LABELS))
+ label_keys = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, 0);
+
+ QUERY_INSTANCE *last_qi = NULL;
+ size_t priority = 0;
+ time_t update_every_max = 0;
+ for (size_t d = 0; d < qt->query.used; d++) {
+ QUERY_METRIC *qm = query_metric(qt, d);
+ QUERY_DIMENSION *qd = query_dimension(qt, qm->link.query_dimension_id);
+ QUERY_INSTANCE *qi = query_instance(qt, qm->link.query_instance_id);
+ QUERY_CONTEXT *qc = query_context(qt, qm->link.query_context_id);
+ QUERY_NODE *qn = query_node(qt, qm->link.query_node_id);
+
+ if (qi != last_qi) {
+ last_qi = qi;
+
+ time_t update_every = rrdinstance_acquired_update_every(qi->ria);
+ if (update_every > update_every_max)
+ update_every_max = update_every;
+ }
+
+ priority = qd->priority;
+
+ if(qm->status & RRDR_DIMENSION_HIDDEN)
+ hidden_dimensions++;
+
+ // --------------------------------------------------------------------
+ // generate the group by key
+
+ query_group_by_make_dimension_key(key, group_by, g, qt, qn, qc, qi, qd, qm, query_has_percentage_of_instance);
+
+ // lookup the key in the dictionary
+
+ int pos = -1;
+ int *set = dictionary_set(groups, buffer_tostring(key), &pos, sizeof(pos));
+ if (*set == -1) {
+ // the key just added to the dictionary
+
+ *set = pos = added++;
+
+ // ----------------------------------------------------------------
+ // generate the dimension id
+
+ query_group_by_make_dimension_id(key, group_by, g, qt, qn, qc, qi, qd, qm, query_has_percentage_of_instance);
+ entries[pos].id = string_strdupz(buffer_tostring(key));
+
+ // ----------------------------------------------------------------
+ // generate the dimension name
+
+ query_group_by_make_dimension_name(key, group_by, g, qt, qn, qc, qi, qd, qm, query_has_percentage_of_instance);
+ entries[pos].name = string_strdupz(buffer_tostring(key));
+
+ // add the rest of the info
+ entries[pos].units = rrdinstance_acquired_units_dup(qi->ria);
+ entries[pos].priority = priority;
+
+ if (label_keys) {
+ entries[pos].dl = dictionary_create_advanced(
+ DICT_OPTION_SINGLE_THREADED | DICT_OPTION_FIXED_SIZE | DICT_OPTION_DONT_OVERWRITE_VALUE,
+ NULL, sizeof(struct group_by_label_key));
+ dictionary_register_insert_callback(entries[pos].dl, group_by_label_key_insert_cb, label_keys);
+ dictionary_register_delete_callback(entries[pos].dl, group_by_label_key_delete_cb, label_keys);
+ }
+ } else {
+ // the key found in the dictionary
+ pos = *set;
+ }
+
+ entries[pos].count++;
+
+ if (unlikely(priority < entries[pos].priority))
+ entries[pos].priority = priority;
+
+ if(g > 0)
+ last_r->dgbs[qm->grouped_as.slot] = pos;
+ else
+ qm->grouped_as.first_slot = pos;
+
+ qm->grouped_as.slot = pos;
+ qm->grouped_as.id = entries[pos].id;
+ qm->grouped_as.name = entries[pos].name;
+ qm->grouped_as.units = entries[pos].units;
+
+ // copy the dimension flags decided by the query target
+ // we need this, because if a dimension is explicitly selected
+ // the query target adds to it the non-zero flag
+ qm->status |= RRDR_DIMENSION_GROUPED;
+
+ if(query_has_percentage_of_instance)
+ // when the query has percentage of instance
+ // there will be no hidden dimensions in the final query
+ // so we have to remove the hidden flag from all dimensions
+ entries[pos].od |= qm->status & ~RRDR_DIMENSION_HIDDEN;
+ else
+ entries[pos].od |= qm->status;
+
+ if (entries[pos].dl)
+ rrdlabels_walkthrough_read(rrdinstance_acquired_labels(qi->ria),
+ rrdlabels_traversal_cb_to_group_by_label_key, entries[pos].dl);
+ }
+
+ RRDR *r = rrdr_create(owa, qt, added, qt->window.points);
+ if (!r) {
+ internal_error(true,
+ "QUERY: cannot create group by RRDR for %s, after=%ld, before=%ld, dimensions=%d, points=%zu",
+ qt->id, qt->window.after, qt->window.before, added, qt->window.points);
+ goto cleanup;
+ }
+
+ bool hidden_dimension_on_percentage_of_instance = hidden_dimensions && (group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE);
+
+ // prevent double cleanup in case of error
+ added = 0;
+
+ if(!last_r)
+ first_r = last_r = r;
+ else
+ last_r->group_by.r = r;
+
+ last_r = r;
+
+ rrd2rrdr_set_timestamps(r);
+ r->dp = onewayalloc_callocz(owa, r->d, sizeof(*r->dp));
+ r->dview = onewayalloc_callocz(owa, r->d, sizeof(*r->dview));
+ r->dgbc = onewayalloc_callocz(owa, r->d, sizeof(*r->dgbc));
+ r->gbc = onewayalloc_callocz(owa, r->n * r->d, sizeof(*r->gbc));
+ r->dqp = onewayalloc_callocz(owa, r->d, sizeof(STORAGE_POINT));
+
+ if(hidden_dimension_on_percentage_of_instance)
+ // this is where we are going to group the hidden dimensions
+ r->vh = onewayalloc_mallocz(owa, r->n * r->d * sizeof(*r->vh));
+
+ if(!final_grouping)
+ // this is where we are going to store the slot in the next RRDR
+ // that we are going to group by the dimension of this RRDR
+ r->dgbs = onewayalloc_callocz(owa, r->d, sizeof(*r->dgbs));
+
+ if (label_keys) {
+ r->dl = onewayalloc_callocz(owa, r->d, sizeof(DICTIONARY *));
+ r->label_keys = label_keys;
+ label_keys = NULL;
+ }
+
+ // zero r (dimension options, names, and ids)
+ // this is required, because group-by may lead to empty dimensions
+ for (size_t d = 0; d < r->d; d++) {
+ r->di[d] = entries[d].id;
+ r->dn[d] = entries[d].name;
+
+ r->od[d] = entries[d].od;
+ r->du[d] = entries[d].units;
+ r->dp[d] = entries[d].priority;
+ r->dgbc[d] = entries[d].count;
+
+ if (r->dl)
+ r->dl[d] = entries[d].dl;
+ }
+
+ // initialize partial trimming
+ r->partial_data_trimming.max_update_every = update_every_max;
+ r->partial_data_trimming.expected_after =
+ (!(qt->window.options & RRDR_OPTION_RETURN_RAW) &&
+ qt->window.before >= qt->window.now - update_every_max) ?
+ qt->window.before - update_every_max :
+ qt->window.before;
+ r->partial_data_trimming.trimmed_after = qt->window.before;
+
+ // make all values empty
+ for (size_t i = 0; i != r->n; i++) {
+ NETDATA_DOUBLE *cn = &r->v[i * r->d];
+ RRDR_VALUE_FLAGS *co = &r->o[i * r->d];
+ NETDATA_DOUBLE *ar = &r->ar[i * r->d];
+ NETDATA_DOUBLE *vh = r->vh ? &r->vh[i * r->d] : NULL;
+
+ for (size_t d = 0; d < r->d; d++) {
+ cn[d] = NAN;
+ ar[d] = 0.0;
+ co[d] = RRDR_VALUE_EMPTY;
+
+ if(vh)
+ *vh = NAN;
+ }
+ }
+ }
+
+ if(!first_r || !last_r)
+ goto cleanup;
+
+ r_tmp = rrdr_create(owa, qt, 1, qt->window.points);
+ if (!r_tmp) {
+ internal_error(true,
+ "QUERY: cannot create group by temporary RRDR for %s, after=%ld, before=%ld, dimensions=%d, points=%zu",
+ qt->id, qt->window.after, qt->window.before, 1, qt->window.points);
+ goto cleanup;
+ }
+ rrd2rrdr_set_timestamps(r_tmp);
+ r_tmp->group_by.r = first_r;
+
+cleanup:
+ if(!first_r || !last_r || !r_tmp) {
+ if(r_tmp) {
+ r_tmp->group_by.r = NULL;
+ rrdr_free(owa, r_tmp);
+ }
+
+ if(first_r) {
+ RRDR *r = first_r;
+ while (r) {
+ r_tmp = r->group_by.r;
+ r->group_by.r = NULL;
+ rrdr_free(owa, r);
+ r = r_tmp;
+ }
+ }
+
+ if(entries && added) {
+ for (int d = 0; d < added; d++) {
+ string_freez(entries[d].id);
+ string_freez(entries[d].name);
+ string_freez(entries[d].units);
+ dictionary_destroy(entries[d].dl);
+ }
+ }
+ dictionary_destroy(label_keys);
+
+ first_r = last_r = r_tmp = NULL;
+ }
+
+ buffer_free(key);
+ onewayalloc_freez(owa, entries);
+ dictionary_destroy(groups);
+
+ return r_tmp;
+}
+
+static void rrd2rrdr_group_by_add_metric(RRDR *r_dst, size_t d_dst, RRDR *r_tmp, size_t d_tmp,
+ RRDR_GROUP_BY_FUNCTION group_by_aggregate_function,
+ STORAGE_POINT *query_points, size_t pass __maybe_unused) {
+ if(!r_tmp || r_dst == r_tmp || !(r_tmp->od[d_tmp] & RRDR_DIMENSION_QUERIED))
+ return;
+
+ internal_fatal(r_dst->n != r_tmp->n, "QUERY: group-by source and destination do not have the same number of rows");
+ internal_fatal(d_dst >= r_dst->d, "QUERY: group-by destination dimension number exceeds destination RRDR size");
+ internal_fatal(d_tmp >= r_tmp->d, "QUERY: group-by source dimension number exceeds source RRDR size");
+ internal_fatal(!r_dst->dqp, "QUERY: group-by destination is not properly prepared (missing dqp array)");
+ internal_fatal(!r_dst->gbc, "QUERY: group-by destination is not properly prepared (missing gbc array)");
+
+ bool hidden_dimension_on_percentage_of_instance = (r_tmp->od[d_tmp] & RRDR_DIMENSION_HIDDEN) && r_dst->vh;
+
+ if(!hidden_dimension_on_percentage_of_instance) {
+ r_dst->od[d_dst] |= r_tmp->od[d_tmp];
+ storage_point_merge_to(r_dst->dqp[d_dst], *query_points);
+ }
+
+ // do the group_by
+ for(size_t i = 0; i != rrdr_rows(r_tmp) ; i++) {
+
+ size_t idx_tmp = i * r_tmp->d + d_tmp;
+ NETDATA_DOUBLE n_tmp = r_tmp->v[ idx_tmp ];
+ RRDR_VALUE_FLAGS o_tmp = r_tmp->o[ idx_tmp ];
+ NETDATA_DOUBLE ar_tmp = r_tmp->ar[ idx_tmp ];
+
+ if(o_tmp & RRDR_VALUE_EMPTY)
+ continue;
+
+ size_t idx_dst = i * r_dst->d + d_dst;
+ NETDATA_DOUBLE *cn = (hidden_dimension_on_percentage_of_instance) ? &r_dst->vh[ idx_dst ] : &r_dst->v[ idx_dst ];
+ RRDR_VALUE_FLAGS *co = &r_dst->o[ idx_dst ];
+ NETDATA_DOUBLE *ar = &r_dst->ar[ idx_dst ];
+ uint32_t *gbc = &r_dst->gbc[ idx_dst ];
+
+ switch(group_by_aggregate_function) {
+ default:
+ case RRDR_GROUP_BY_FUNCTION_AVERAGE:
+ case RRDR_GROUP_BY_FUNCTION_SUM:
+ if(isnan(*cn))
+ *cn = n_tmp;
+ else
+ *cn += n_tmp;
+ break;
+
+ case RRDR_GROUP_BY_FUNCTION_MIN:
+ if(isnan(*cn) || n_tmp < *cn)
+ *cn = n_tmp;
+ break;
+
+ case RRDR_GROUP_BY_FUNCTION_MAX:
+ if(isnan(*cn) || n_tmp > *cn)
+ *cn = n_tmp;
+ break;
+ }
+
+ if(!hidden_dimension_on_percentage_of_instance) {
+ *co &= ~RRDR_VALUE_EMPTY;
+ *co |= (o_tmp & (RRDR_VALUE_RESET | RRDR_VALUE_PARTIAL));
+ *ar += ar_tmp;
+ (*gbc)++;
+ }
+ }
+}
+
+static void rrdr2rrdr_group_by_partial_trimming(RRDR *r) {
+ time_t trimmable_after = r->partial_data_trimming.expected_after;
+
+ // find the point just before the trimmable ones
+ ssize_t i = (ssize_t)r->n - 1;
+ for( ; i >= 0 ;i--) {
+ if (r->t[i] < trimmable_after)
+ break;
+ }
+
+ if(unlikely(i < 0))
+ return;
+
+ size_t last_row_gbc = 0;
+ for (; i < (ssize_t)r->n; i++) {
+ size_t row_gbc = 0;
+ for (size_t d = 0; d < r->d; d++) {
+ if (unlikely(!(r->od[d] & RRDR_DIMENSION_QUERIED)))
+ continue;
+
+ row_gbc += r->gbc[ i * r->d + d ];
+ }
+
+ if (unlikely(r->t[i] >= trimmable_after && row_gbc < last_row_gbc)) {
+ // discard the rest of the points
+ r->partial_data_trimming.trimmed_after = r->t[i];
+ r->rows = i;
+ break;
+ }
+ else
+ last_row_gbc = row_gbc;
+ }
+}
+
+static void rrdr2rrdr_group_by_calculate_percentage_of_instance(RRDR *r) {
+ if(!r->vh)
+ return;
+
+ for(size_t i = 0; i < r->n ;i++) {
+ NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
+ NETDATA_DOUBLE *ch = &r->vh[ i * r->d ];
+
+ for(size_t d = 0; d < r->d ;d++) {
+ NETDATA_DOUBLE n = cn[d];
+ NETDATA_DOUBLE h = ch[d];
+
+ if(isnan(n))
+ cn[d] = 0.0;
+
+ else if(isnan(h))
+ cn[d] = 100.0;
+
+ else
+ cn[d] = n * 100.0 / (n + h);
+ }
+ }
+}
+
+static void rrd2rrdr_convert_to_percentage(RRDR *r) {
+ size_t global_min_max_values = 0;
+ NETDATA_DOUBLE global_min = NAN, global_max = NAN;
+
+ for(size_t i = 0; i != r->n ;i++) {
+ NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
+ RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
+
+ NETDATA_DOUBLE total = 0;
+ for (size_t d = 0; d < r->d; d++) {
+ if (unlikely(!(r->od[d] & RRDR_DIMENSION_QUERIED)))
+ continue;
+
+ if(co[d] & RRDR_VALUE_EMPTY)
+ continue;
+
+ total += cn[d];
+ }
+
+ if(total == 0.0)
+ total = 1.0;
+
+ for (size_t d = 0; d < r->d; d++) {
+ if (unlikely(!(r->od[d] & RRDR_DIMENSION_QUERIED)))
+ continue;
+
+ if(co[d] & RRDR_VALUE_EMPTY)
+ continue;
+
+ NETDATA_DOUBLE n = cn[d];
+ n = cn[d] = n * 100.0 / total;
+
+ if(unlikely(!global_min_max_values++))
+ global_min = global_max = n;
+ else {
+ if(n < global_min)
+ global_min = n;
+ if(n > global_max)
+ global_max = n;
+ }
+ }
+ }
+
+ r->view.min = global_min;
+ r->view.max = global_max;
+
+ if(!r->dview)
+ // v1 query
+ return;
+
+ // v2 query
+
+ for (size_t d = 0; d < r->d; d++) {
+ if (unlikely(!(r->od[d] & RRDR_DIMENSION_QUERIED)))
+ continue;
+
+ size_t count = 0;
+ NETDATA_DOUBLE min = 0.0, max = 0.0, sum = 0.0, ars = 0.0;
+ for(size_t i = 0; i != r->rows ;i++) { // we use r->rows to respect trimming
+ size_t idx = i * r->d + d;
+
+ RRDR_VALUE_FLAGS o = r->o[ idx ];
+
+ if (o & RRDR_VALUE_EMPTY)
+ continue;
+
+ NETDATA_DOUBLE ar = r->ar[ idx ];
+ ars += ar;
+
+ NETDATA_DOUBLE n = r->v[ idx ];
+ sum += n;
+
+ if(!count++)
+ min = max = n;
+ else {
+ if(n < min)
+ min = n;
+ if(n > max)
+ max = n;
+ }
+ }
+
+ r->dview[d] = (STORAGE_POINT) {
+ .sum = sum,
+ .count = count,
+ .min = min,
+ .max = max,
+ .anomaly_count = (size_t)(ars * (NETDATA_DOUBLE)count),
+ };
+ }
+}
+
+static RRDR *rrd2rrdr_group_by_finalize(RRDR *r_tmp) {
+ QUERY_TARGET *qt = r_tmp->internal.qt;
+ RRDR_OPTIONS options = qt->window.options;
+
+ if(!r_tmp->group_by.r) {
+ // v1 query
+ if(options & RRDR_OPTION_PERCENTAGE)
+ rrd2rrdr_convert_to_percentage(r_tmp);
+ return r_tmp;
+ }
+ // v2 query
+
+ // do the additional passes on RRDRs
+ RRDR *last_r = r_tmp->group_by.r;
+ rrdr2rrdr_group_by_calculate_percentage_of_instance(last_r);
+
+ RRDR *r = last_r->group_by.r;
+ size_t pass = 0;
+ while(r) {
+ pass++;
+ for(size_t d = 0; d < last_r->d ;d++) {
+ rrd2rrdr_group_by_add_metric(r, last_r->dgbs[d], last_r, d,
+ qt->request.group_by[pass].aggregation,
+ &last_r->dqp[d], pass);
+ }
+ rrdr2rrdr_group_by_calculate_percentage_of_instance(r);
+
+ last_r = r;
+ r = last_r->group_by.r;
+ }
+
+ // free all RRDRs except the last one
+ r = r_tmp;
+ while(r != last_r) {
+ r_tmp = r->group_by.r;
+ r->group_by.r = NULL;
+ rrdr_free(r->internal.owa, r);
+ r = r_tmp;
+ }
+ r = last_r;
+
+ // find the final aggregation
+ RRDR_GROUP_BY_FUNCTION aggregation = qt->request.group_by[0].aggregation;
+ for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++)
+ if(qt->request.group_by[g].group_by != RRDR_GROUP_BY_NONE)
+ aggregation = qt->request.group_by[g].aggregation;
+
+ if(!(options & RRDR_OPTION_RETURN_RAW) && r->partial_data_trimming.expected_after < qt->window.before)
+ rrdr2rrdr_group_by_partial_trimming(r);
+
+ // apply averaging, remove RRDR_VALUE_EMPTY, find the non-zero dimensions, min and max
+ size_t global_min_max_values = 0;
+ size_t dimensions_nonzero = 0;
+ NETDATA_DOUBLE global_min = NAN, global_max = NAN;
+ for (size_t d = 0; d < r->d; d++) {
+ if (unlikely(!(r->od[d] & RRDR_DIMENSION_QUERIED)))
+ continue;
+
+ size_t points_nonzero = 0;
+ NETDATA_DOUBLE min = 0, max = 0, sum = 0, ars = 0;
+ size_t count = 0;
+
+ for(size_t i = 0; i != r->n ;i++) {
+ size_t idx = i * r->d + d;
+
+ NETDATA_DOUBLE *cn = &r->v[ idx ];
+ RRDR_VALUE_FLAGS *co = &r->o[ idx ];
+ NETDATA_DOUBLE *ar = &r->ar[ idx ];
+ uint32_t gbc = r->gbc[ idx ];
+
+ if(likely(gbc)) {
+ *co &= ~RRDR_VALUE_EMPTY;
+
+ if(gbc != r->dgbc[d])
+ *co |= RRDR_VALUE_PARTIAL;
+
+ NETDATA_DOUBLE n;
+
+ sum += *cn;
+ ars += *ar;
+
+ if(aggregation == RRDR_GROUP_BY_FUNCTION_AVERAGE && !query_target_aggregatable(qt))
+ n = (*cn /= gbc);
+ else
+ n = *cn;
+
+ if(!query_target_aggregatable(qt))
+ *ar /= gbc;
+
+ if(islessgreater(n, 0.0))
+ points_nonzero++;
+
+ if(unlikely(!count))
+ min = max = n;
+ else {
+ if(n < min)
+ min = n;
+
+ if(n > max)
+ max = n;
+ }
+
+ if(unlikely(!global_min_max_values++))
+ global_min = global_max = n;
+ else {
+ if(n < global_min)
+ global_min = n;
+
+ if(n > global_max)
+ global_max = n;
+ }
+
+ count += gbc;
+ }
+ }
+
+ if(points_nonzero) {
+ r->od[d] |= RRDR_DIMENSION_NONZERO;
+ dimensions_nonzero++;
+ }
+
+ r->dview[d] = (STORAGE_POINT) {
+ .sum = sum,
+ .count = count,
+ .min = min,
+ .max = max,
+ .anomaly_count = (size_t)(ars * RRDR_DVIEW_ANOMALY_COUNT_MULTIPLIER / 100.0),
+ };
+ }
+
+ r->view.min = global_min;
+ r->view.max = global_max;
+
+ if(!dimensions_nonzero && (qt->window.options & RRDR_OPTION_NONZERO)) {
+ // all dimensions are zero
+ // remove the nonzero option
+ qt->window.options &= ~RRDR_OPTION_NONZERO;
+ }
+
+ if(options & RRDR_OPTION_PERCENTAGE && !(options & RRDR_OPTION_RETURN_RAW))
+ rrd2rrdr_convert_to_percentage(r);
+
+ // update query instance counts in query host and query context
+ {
+ size_t h = 0, c = 0, i = 0;
+ for(; h < qt->nodes.used ; h++) {
+ QUERY_NODE *qn = &qt->nodes.array[h];
+
+ for(; c < qt->contexts.used ;c++) {
+ QUERY_CONTEXT *qc = &qt->contexts.array[c];
+
+ if(!rrdcontext_acquired_belongs_to_host(qc->rca, qn->rrdhost))
+ break;
+
+ for(; i < qt->instances.used ;i++) {
+ QUERY_INSTANCE *qi = &qt->instances.array[i];
+
+ if(!rrdinstance_acquired_belongs_to_context(qi->ria, qc->rca))
+ break;
+
+ if(qi->metrics.queried) {
+ qc->instances.queried++;
+ qn->instances.queried++;
+ }
+ else if(qi->metrics.failed) {
+ qc->instances.failed++;
+ qn->instances.failed++;
+ }
+ }
+ }
+ }
+ }
+
+ return r;
+}
+
+// ----------------------------------------------------------------------------
+// query entry point
+
RRDR *rrd2rrdr_legacy(
ONEWAYALLOC *owa,
RRDSET *st, size_t points, time_t after, time_t before,
- RRDR_GROUPING group_method, time_t resampling_time, RRDR_OPTIONS options, const char *dimensions,
- const char *group_options, time_t timeout, size_t tier, QUERY_SOURCE query_source,
+ RRDR_TIME_GROUPING group_method, time_t resampling_time, RRDR_OPTIONS options, const char *dimensions,
+ const char *group_options, time_t timeout_ms, size_t tier, QUERY_SOURCE query_source,
STORAGE_PRIORITY priority) {
QUERY_TARGET_REQUEST qtr = {
+ .version = 1,
.st = st,
.points = points,
.after = after,
.before = before,
- .group_method = group_method,
+ .time_group_method = group_method,
.resampling_time = resampling_time,
.options = options,
.dimensions = dimensions,
- .group_options = group_options,
- .timeout = timeout,
+ .time_group_options = group_options,
+ .timeout_ms = timeout_ms,
.tier = tier,
.query_source = query_source,
.priority = priority,
};
- return rrd2rrdr(owa, query_target_create(&qtr));
+ QUERY_TARGET *qt = query_target_create(&qtr);
+ RRDR *r = rrd2rrdr(owa, qt);
+ if(!r) {
+ query_target_release(qt);
+ return NULL;
+ }
+
+ r->internal.release_with_rrdr_qt = qt;
+ return r;
}
RRDR *rrd2rrdr(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
- if(!qt)
+ if(!qt || !owa)
return NULL;
- if(!owa) {
- query_target_release(qt);
- return NULL;
- }
-
// qt.window members are the WANTED ones.
// qt.request members are the REQUESTED ones.
- RRDR *r = rrdr_create(owa, qt);
- if(unlikely(!r)) {
- internal_error(true, "QUERY: cannot create RRDR for %s, after=%ld, before=%ld, points=%zu",
- qt->id, qt->window.after, qt->window.before, qt->window.points);
+ RRDR *r_tmp = rrd2rrdr_group_by_initialize(owa, qt);
+ if(!r_tmp)
return NULL;
- }
- if(unlikely(!r->d || !qt->window.points)) {
- internal_error(true, "QUERY: returning empty RRDR (no dimensions in RRDSET) for %s, after=%ld, before=%ld, points=%zu",
- qt->id, qt->window.after, qt->window.before, qt->window.points);
- return r;
- }
+ // the RRDR we group-by at
+ RRDR *r = (r_tmp->group_by.r) ? r_tmp->group_by.r : r_tmp;
+
+ // the final RRDR to return to callers
+ RRDR *last_r = r_tmp;
+ while(last_r->group_by.r)
+ last_r = last_r->group_by.r;
if(qt->window.relative)
- r->result_options |= RRDR_RESULT_OPTION_RELATIVE;
+ last_r->view.flags |= RRDR_RESULT_FLAG_RELATIVE;
else
- r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
-
- // -------------------------------------------------------------------------
- // initialize RRDR
-
- r->group = qt->window.group;
- r->update_every = (int) (qt->window.group * qt->window.query_granularity);
- r->before = qt->window.before;
- r->after = qt->window.after;
- r->internal.points_wanted = qt->window.points;
- r->internal.resampling_group = qt->window.resampling_group;
- r->internal.resampling_divisor = qt->window.resampling_divisor;
- r->internal.query_options = qt->window.options;
+ last_r->view.flags |= RRDR_RESULT_FLAG_ABSOLUTE;
// -------------------------------------------------------------------------
// assign the processor functions
- rrdr_set_grouping_function(r, qt->window.group_method);
+ rrdr_set_grouping_function(r_tmp, qt->window.time_group_method);
// allocate any memory required by the grouping method
- r->internal.grouping_create(r, qt->window.group_options);
+ r_tmp->time_grouping.create(r_tmp, qt->window.time_group_options);
// -------------------------------------------------------------------------
// do the work for each dimension
@@ -2239,122 +3529,207 @@ RRDR *rrd2rrdr(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
size_t max_rows = 0;
long dimensions_used = 0, dimensions_nonzero = 0;
- struct timeval query_start_time;
- struct timeval query_current_time;
- if (qt->request.timeout)
- now_realtime_timeval(&query_start_time);
-
size_t last_db_points_read = 0;
size_t last_result_points_generated = 0;
- QUERY_ENGINE_OPS **ops = onewayalloc_callocz(r->internal.owa, qt->query.used, sizeof(QUERY_ENGINE_OPS *));
+ internal_fatal(released_ops, "QUERY: released_ops should be NULL when the query starts");
- size_t capacity = libuv_worker_threads * 2;
+ QUERY_ENGINE_OPS **ops = NULL;
+ if(qt->query.used)
+ ops = onewayalloc_callocz(owa, qt->query.used, sizeof(QUERY_ENGINE_OPS *));
+
+ size_t capacity = libuv_worker_threads * 10;
size_t max_queries_to_prepare = (qt->query.used > (capacity - 1)) ? (capacity - 1) : qt->query.used;
size_t queries_prepared = 0;
while(queries_prepared < max_queries_to_prepare) {
// preload another query
- ops[queries_prepared] = rrd2rrdr_query_prep(r, queries_prepared);
+ ops[queries_prepared] = rrd2rrdr_query_ops_prep(r_tmp, queries_prepared);
queries_prepared++;
}
- for(size_t c = 0, max = qt->query.used; c < max ; c++) {
+ QUERY_NODE *last_qn = NULL;
+ usec_t last_ut = now_monotonic_usec();
+ usec_t last_qn_ut = last_ut;
+
+ for(size_t d = 0; d < qt->query.used ; d++) {
+ QUERY_METRIC *qm = query_metric(qt, d);
+ QUERY_DIMENSION *qd = query_dimension(qt, qm->link.query_dimension_id);
+ QUERY_INSTANCE *qi = query_instance(qt, qm->link.query_instance_id);
+ QUERY_CONTEXT *qc = query_context(qt, qm->link.query_context_id);
+ QUERY_NODE *qn = query_node(qt, qm->link.query_node_id);
- if(queries_prepared < max) {
+ usec_t now_ut = last_ut;
+ if(qn != last_qn) {
+ if(last_qn)
+ last_qn->duration_ut = now_ut - last_qn_ut;
+
+ last_qn = qn;
+ last_qn_ut = now_ut;
+ }
+
+ if(queries_prepared < qt->query.used) {
// preload another query
- ops[queries_prepared] = rrd2rrdr_query_prep(r, queries_prepared);
+ ops[queries_prepared] = rrd2rrdr_query_ops_prep(r_tmp, queries_prepared);
queries_prepared++;
}
+ size_t dim_in_rrdr_tmp = (r_tmp != r) ? 0 : d;
+
// set the query target dimension options to rrdr
- r->od[c] = qt->query.array[c].dimension.options;
+ r_tmp->od[dim_in_rrdr_tmp] = qm->status;
// reset the grouping for the new dimension
- r->internal.grouping_reset(r);
+ r_tmp->time_grouping.reset(r_tmp);
- if(ops[c]) {
- r->od[c] |= RRDR_DIMENSION_QUERIED;
- rrd2rrdr_query_execute(r, c, ops[c]);
+ if(ops[d]) {
+ rrd2rrdr_query_execute(r_tmp, dim_in_rrdr_tmp, ops[d]);
+ r_tmp->od[dim_in_rrdr_tmp] |= RRDR_DIMENSION_QUERIED;
+
+ now_ut = now_monotonic_usec();
+ qm->duration_ut = now_ut - last_ut;
+ last_ut = now_ut;
+
+ if(r_tmp != r) {
+ // copy back whatever got updated from the temporary r
+
+ // the query updates RRDR_DIMENSION_NONZERO
+ qm->status = r_tmp->od[dim_in_rrdr_tmp];
+
+ // the query updates these
+ r->view.min = r_tmp->view.min;
+ r->view.max = r_tmp->view.max;
+ r->view.after = r_tmp->view.after;
+ r->view.before = r_tmp->view.before;
+ r->rows = r_tmp->rows;
+
+ rrd2rrdr_group_by_add_metric(r, qm->grouped_as.first_slot, r_tmp, dim_in_rrdr_tmp,
+ qt->request.group_by[0].aggregation, &qm->query_points, 0);
+ }
+
+ rrd2rrdr_query_ops_release(ops[d]); // reuse this ops allocation
+ ops[d] = NULL;
+
+ qi->metrics.queried++;
+ qc->metrics.queried++;
+ qn->metrics.queried++;
+
+ qd->status |= QUERY_STATUS_QUERIED;
+ qm->status |= RRDR_DIMENSION_QUERIED;
+
+ if(qt->request.version >= 2) {
+ // we need to make the query points positive now
+ // since we will aggregate it across multiple dimensions
+ storage_point_make_positive(qm->query_points);
+ storage_point_merge_to(qi->query_points, qm->query_points);
+ storage_point_merge_to(qc->query_points, qm->query_points);
+ storage_point_merge_to(qn->query_points, qm->query_points);
+ storage_point_merge_to(qt->query_points, qm->query_points);
+ }
}
- else
+ else {
+ qi->metrics.failed++;
+ qc->metrics.failed++;
+ qn->metrics.failed++;
+
+ qd->status |= QUERY_STATUS_FAILED;
+ qm->status |= RRDR_DIMENSION_FAILED;
+
continue;
+ }
global_statistics_rrdr_query_completed(
1,
- r->internal.db_points_read - last_db_points_read,
- r->internal.result_points_generated - last_result_points_generated,
+ r_tmp->stats.db_points_read - last_db_points_read,
+ r_tmp->stats.result_points_generated - last_result_points_generated,
qt->request.query_source);
- last_db_points_read = r->internal.db_points_read;
- last_result_points_generated = r->internal.result_points_generated;
-
- if (qt->request.timeout)
- now_realtime_timeval(&query_current_time);
+ last_db_points_read = r_tmp->stats.db_points_read;
+ last_result_points_generated = r_tmp->stats.result_points_generated;
- if(r->od[c] & RRDR_DIMENSION_NONZERO)
+ if(qm->status & RRDR_DIMENSION_NONZERO)
dimensions_nonzero++;
// verify all dimensions are aligned
if(unlikely(!dimensions_used)) {
- min_before = r->before;
- max_after = r->after;
+ min_before = r->view.before;
+ max_after = r->view.after;
max_rows = r->rows;
}
else {
- if(r->after != max_after) {
+ if(r->view.after != max_after) {
internal_error(true, "QUERY: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- string2str(qt->query.array[c].dimension.id), (size_t)max_after, string2str(qt->query.array[c].dimension.name), (size_t)r->after);
+ rrdinstance_acquired_id(qi->ria), (size_t)max_after, rrdmetric_acquired_id(qd->rma), (size_t)r->view.after);
- r->after = (r->after > max_after) ? r->after : max_after;
+ r->view.after = (r->view.after > max_after) ? r->view.after : max_after;
}
- if(r->before != min_before) {
+ if(r->view.before != min_before) {
internal_error(true, "QUERY: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- string2str(qt->query.array[c].dimension.id), (size_t)min_before, string2str(qt->query.array[c].dimension.name), (size_t)r->before);
+ rrdinstance_acquired_id(qi->ria), (size_t)min_before, rrdmetric_acquired_id(qd->rma), (size_t)r->view.before);
- r->before = (r->before < min_before) ? r->before : min_before;
+ r->view.before = (r->view.before < min_before) ? r->view.before : min_before;
}
if(r->rows != max_rows) {
internal_error(true, "QUERY: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- string2str(qt->query.array[c].dimension.id), (size_t)max_rows, string2str(qt->query.array[c].dimension.name), (size_t)r->rows);
+ rrdinstance_acquired_id(qi->ria), (size_t)max_rows, rrdmetric_acquired_id(qd->rma), (size_t)r->rows);
r->rows = (r->rows > max_rows) ? r->rows : max_rows;
}
}
dimensions_used++;
- if (qt->request.timeout && ((NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0) > (NETDATA_DOUBLE)qt->request.timeout) {
+
+ bool cancel = false;
+ if (qt->request.interrupt_callback && qt->request.interrupt_callback(qt->request.interrupt_callback_data)) {
+ cancel = true;
+ log_access("QUERY INTERRUPTED");
+ }
+
+ if (qt->request.timeout_ms && ((NETDATA_DOUBLE)(now_ut - qt->timings.received_ut) / 1000.0) > (NETDATA_DOUBLE)qt->request.timeout_ms) {
+ cancel = true;
log_access("QUERY CANCELED RUNTIME EXCEEDED %0.2f ms (LIMIT %lld ms)",
- (NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0, (long long)qt->request.timeout);
- r->result_options |= RRDR_RESULT_OPTION_CANCEL;
+ (NETDATA_DOUBLE)(now_ut - qt->timings.received_ut) / 1000.0, (long long)qt->request.timeout_ms);
+ }
- for(size_t i = c + 1; i < queries_prepared ; i++) {
- if(ops[i])
+ if(cancel) {
+ r->view.flags |= RRDR_RESULT_FLAG_CANCEL;
+
+ for(size_t i = d + 1; i < queries_prepared ; i++) {
+ if(ops[i]) {
query_planer_finalize_remaining_plans(ops[i]);
+ rrd2rrdr_query_ops_release(ops[i]);
+ ops[i] = NULL;
+ }
}
break;
}
}
+ // free all resources used by the grouping method
+ r_tmp->time_grouping.free(r_tmp);
+
+ // get the final RRDR to send to the caller
+ r = rrd2rrdr_group_by_finalize(r_tmp);
+
#ifdef NETDATA_INTERNAL_CHECKS
- if (dimensions_used) {
+ if (dimensions_used && !(r->view.flags & RRDR_RESULT_FLAG_CANCEL)) {
if(r->internal.log)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.time_group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
qt->window.after, qt->request.after, qt->window.before, qt->request.before,
qt->request.points, qt->window.points, /*after_slot, before_slot,*/
r->internal.log);
if(r->rows != qt->window.points)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.time_group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
qt->window.after, qt->request.after, qt->window.before, qt->request.before,
qt->request.points, qt->window.points, /*after_slot, before_slot,*/
"got 'points' is not wanted 'points'");
- if(qt->window.aligned && (r->before % (qt->window.group * qt->window.query_granularity)) != 0)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
- qt->window.after, qt->request.after, qt->window.before,qt->request.before,
+ if(qt->window.aligned && (r->view.before % query_view_update_every(qt)) != 0)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.time_group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ qt->window.after, qt->request.after, qt->window.before, qt->request.before,
qt->request.points, qt->window.points, /*after_slot, before_slot,*/
"'before' is not aligned but alignment is required");
@@ -2362,21 +3737,21 @@ RRDR *rrd2rrdr(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
//if(qt->window.aligned && (r->after % group) != 0)
// rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group, qt->window.after, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
- if(r->before != qt->window.before)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ if(r->view.before != qt->window.before)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.time_group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
qt->window.after, qt->request.after, qt->window.before, qt->request.before,
qt->request.points, qt->window.points, /*after_slot, before_slot,*/
"chart is not aligned to requested 'before'");
- if(r->before != qt->window.before)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ if(r->view.before != qt->window.before)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.time_group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
qt->window.after, qt->request.after, qt->window.before, qt->request.before,
qt->request.points, qt->window.points, /*after_slot, before_slot,*/
"got 'before' is not wanted 'before'");
// reported 'after' varies, depending on group
- if(r->after != qt->window.after)
- rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ if(r->view.after != qt->window.after)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.time_group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
qt->window.after, qt->request.after, qt->window.before, qt->request.before,
qt->request.points, qt->window.points, /*after_slot, before_slot,*/
"got 'after' is not wanted 'after'");
@@ -2384,26 +3759,21 @@ RRDR *rrd2rrdr(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
}
#endif
- // free all resources used by the grouping method
- r->internal.grouping_free(r);
+ // free the query pipelining ops
+ for(size_t d = 0; d < qt->query.used ; d++) {
+ rrd2rrdr_query_ops_release(ops[d]);
+ ops[d] = NULL;
+ }
+ rrd2rrdr_query_ops_freeall(r);
+ internal_fatal(released_ops, "QUERY: released_ops should be NULL when the query ends");
- if(likely(dimensions_used)) {
+ onewayalloc_freez(owa, ops);
+
+ if(likely(dimensions_used && (qt->window.options & RRDR_OPTION_NONZERO) && !dimensions_nonzero))
// when all the dimensions are zero, we should return all of them
- if (unlikely((qt->window.options & RRDR_OPTION_NONZERO) && !dimensions_nonzero &&
- !(r->result_options & RRDR_RESULT_OPTION_CANCEL))) {
- // all the dimensions are zero
- // mark them as NONZERO to send them all
- for (size_t c = 0, max = qt->query.used; c < max; c++) {
- if (unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- if (unlikely(!(r->od[c] & RRDR_DIMENSION_QUERIED))) continue;
- r->od[c] |= RRDR_DIMENSION_NONZERO;
- }
- }
+ qt->window.options &= ~RRDR_OPTION_NONZERO;
- return r;
- }
+ qt->timings.executed_ut = now_monotonic_usec();
- // we couldn't query any dimension
- rrdr_free(owa, r);
- return NULL;
+ return r;
}
diff --git a/web/api/queries/query.h b/web/api/queries/query.h
index ebad5a1f8..e6fdcfbe4 100644
--- a/web/api/queries/query.h
+++ b/web/api/queries/query.h
@@ -7,7 +7,7 @@
extern "C" {
#endif
-typedef enum rrdr_grouping {
+typedef enum rrdr_time_grouping {
RRDR_GROUPING_UNDEFINED = 0,
RRDR_GROUPING_AVERAGE,
RRDR_GROUPING_MIN,
@@ -17,7 +17,7 @@ typedef enum rrdr_grouping {
RRDR_GROUPING_TRIMMED_MEAN1,
RRDR_GROUPING_TRIMMED_MEAN2,
RRDR_GROUPING_TRIMMED_MEAN3,
- RRDR_GROUPING_TRIMMED_MEAN5,
+ RRDR_GROUPING_TRIMMED_MEAN,
RRDR_GROUPING_TRIMMED_MEAN10,
RRDR_GROUPING_TRIMMED_MEAN15,
RRDR_GROUPING_TRIMMED_MEAN20,
@@ -36,7 +36,7 @@ typedef enum rrdr_grouping {
RRDR_GROUPING_PERCENTILE75,
RRDR_GROUPING_PERCENTILE80,
RRDR_GROUPING_PERCENTILE90,
- RRDR_GROUPING_PERCENTILE95,
+ RRDR_GROUPING_PERCENTILE,
RRDR_GROUPING_PERCENTILE97,
RRDR_GROUPING_PERCENTILE98,
RRDR_GROUPING_PERCENTILE99,
@@ -45,12 +45,50 @@ typedef enum rrdr_grouping {
RRDR_GROUPING_SES,
RRDR_GROUPING_DES,
RRDR_GROUPING_COUNTIF,
-} RRDR_GROUPING;
+} RRDR_TIME_GROUPING;
-const char *group_method2string(RRDR_GROUPING group);
-void web_client_api_v1_init_grouping(void);
-RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def);
-const char *web_client_api_request_v1_data_group_to_string(RRDR_GROUPING group);
+const char *time_grouping_method2string(RRDR_TIME_GROUPING group);
+void time_grouping_init(void);
+RRDR_TIME_GROUPING time_grouping_parse(const char *name, RRDR_TIME_GROUPING def);
+const char *time_grouping_tostring(RRDR_TIME_GROUPING group);
+
+typedef enum rrdr_group_by {
+ RRDR_GROUP_BY_NONE = 0,
+ RRDR_GROUP_BY_SELECTED = (1 << 0),
+ RRDR_GROUP_BY_DIMENSION = (1 << 1),
+ RRDR_GROUP_BY_INSTANCE = (1 << 2),
+ RRDR_GROUP_BY_LABEL = (1 << 3),
+ RRDR_GROUP_BY_NODE = (1 << 4),
+ RRDR_GROUP_BY_CONTEXT = (1 << 5),
+ RRDR_GROUP_BY_UNITS = (1 << 6),
+ RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE = (1 << 7),
+} RRDR_GROUP_BY;
+
+#define SUPPORTED_GROUP_BY_METHODS (\
+ RRDR_GROUP_BY_SELECTED |\
+ RRDR_GROUP_BY_DIMENSION |\
+ RRDR_GROUP_BY_INSTANCE |\
+ RRDR_GROUP_BY_LABEL |\
+ RRDR_GROUP_BY_NODE |\
+ RRDR_GROUP_BY_CONTEXT |\
+ RRDR_GROUP_BY_UNITS |\
+ RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE \
+)
+
+struct web_buffer;
+
+RRDR_GROUP_BY group_by_parse(char *s);
+void buffer_json_group_by_to_array(struct web_buffer *wb, RRDR_GROUP_BY group_by);
+
+typedef enum rrdr_group_by_function {
+ RRDR_GROUP_BY_FUNCTION_AVERAGE = 0,
+ RRDR_GROUP_BY_FUNCTION_MIN,
+ RRDR_GROUP_BY_FUNCTION_MAX,
+ RRDR_GROUP_BY_FUNCTION_SUM,
+} RRDR_GROUP_BY_FUNCTION;
+
+RRDR_GROUP_BY_FUNCTION group_by_aggregate_function_parse(const char *s);
+const char *group_by_aggregate_function_to_string(RRDR_GROUP_BY_FUNCTION group_by_function);
#ifdef __cplusplus
}
diff --git a/web/api/queries/rrdr.c b/web/api/queries/rrdr.c
index 676224c9d..2a0016891 100644
--- a/web/api/queries/rrdr.c
+++ b/web/api/queries/rrdr.c
@@ -61,41 +61,86 @@ static void rrdr_dump(RRDR *r)
inline void rrdr_free(ONEWAYALLOC *owa, RRDR *r) {
if(unlikely(!r)) return;
- query_target_release(r->internal.qt);
+ for(size_t d = 0; d < r->d ;d++) {
+ string_freez(r->di[d]);
+ string_freez(r->dn[d]);
+ string_freez(r->du[d]);
+ }
+
+ query_target_release(r->internal.release_with_rrdr_qt);
+
onewayalloc_freez(owa, r->t);
onewayalloc_freez(owa, r->v);
+ onewayalloc_freez(owa, r->vh);
onewayalloc_freez(owa, r->o);
onewayalloc_freez(owa, r->od);
+ onewayalloc_freez(owa, r->di);
+ onewayalloc_freez(owa, r->dn);
+ onewayalloc_freez(owa, r->du);
+ onewayalloc_freez(owa, r->dp);
+ onewayalloc_freez(owa, r->dview);
+ onewayalloc_freez(owa, r->dqp);
onewayalloc_freez(owa, r->ar);
+ onewayalloc_freez(owa, r->gbc);
+ onewayalloc_freez(owa, r->dgbc);
+ onewayalloc_freez(owa, r->dgbs);
+
+ if(r->dl) {
+ for(size_t d = 0; d < r->d ;d++)
+ dictionary_destroy(r->dl[d]);
+
+ onewayalloc_freez(owa, r->dl);
+ }
+
+ dictionary_destroy(r->label_keys);
+
+ if(r->group_by.r) {
+ // prevent accidental infinite recursion
+ r->group_by.r->group_by.r = NULL;
+
+ // do not release qt twice
+ r->group_by.r->internal.qt = NULL;
+
+ rrdr_free(owa, r->group_by.r);
+ }
+
onewayalloc_freez(owa, r);
}
-RRDR *rrdr_create(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
- if(unlikely(!qt || !qt->query.used || !qt->window.points))
+RRDR *rrdr_create(ONEWAYALLOC *owa, QUERY_TARGET *qt, size_t dimensions, size_t points) {
+ if(unlikely(!qt))
return NULL;
- size_t dimensions = qt->query.used;
- size_t points = qt->window.points;
-
// create the rrdr
RRDR *r = onewayalloc_callocz(owa, 1, sizeof(RRDR));
r->internal.owa = owa;
r->internal.qt = qt;
- r->before = qt->window.before;
- r->after = qt->window.after;
- r->internal.points_wanted = qt->window.points;
+ r->view.before = qt->window.before;
+ r->view.after = qt->window.after;
+ r->time_grouping.points_wanted = points;
r->d = (int)dimensions;
r->n = (int)points;
- r->t = onewayalloc_callocz(owa, points, sizeof(time_t));
- r->v = onewayalloc_mallocz(owa, points * dimensions * sizeof(NETDATA_DOUBLE));
- r->o = onewayalloc_mallocz(owa, points * dimensions * sizeof(RRDR_VALUE_FLAGS));
- r->ar = onewayalloc_mallocz(owa, points * dimensions * sizeof(NETDATA_DOUBLE));
- r->od = onewayalloc_mallocz(owa, dimensions * sizeof(RRDR_DIMENSION_FLAGS));
+ if(points && dimensions) {
+ r->v = onewayalloc_mallocz(owa, points * dimensions * sizeof(NETDATA_DOUBLE));
+ r->o = onewayalloc_mallocz(owa, points * dimensions * sizeof(RRDR_VALUE_FLAGS));
+ r->ar = onewayalloc_mallocz(owa, points * dimensions * sizeof(NETDATA_DOUBLE));
+ }
+
+ if(points) {
+ r->t = onewayalloc_callocz(owa, points, sizeof(time_t));
+ }
+
+ if(dimensions) {
+ r->od = onewayalloc_mallocz(owa, dimensions * sizeof(RRDR_DIMENSION_FLAGS));
+ r->di = onewayalloc_callocz(owa, dimensions, sizeof(STRING *));
+ r->dn = onewayalloc_callocz(owa, dimensions, sizeof(STRING *));
+ r->du = onewayalloc_callocz(owa, dimensions, sizeof(STRING *));
+ }
- r->group = 1;
- r->update_every = 1;
+ r->view.group = 1;
+ r->view.update_every = 1;
return r;
}
diff --git a/web/api/queries/rrdr.h b/web/api/queries/rrdr.h
index 2d982b136..c57be67f5 100644
--- a/web/api/queries/rrdr.h
+++ b/web/api/queries/rrdr.h
@@ -18,111 +18,152 @@ typedef enum tier_query_fetch {
} TIER_QUERY_FETCH;
typedef enum rrdr_options {
- RRDR_OPTION_NONZERO = 0x00000001, // don't output dimensions with just zero values
- RRDR_OPTION_REVERSED = 0x00000002, // output the rows in reverse order (oldest to newest)
- RRDR_OPTION_ABSOLUTE = 0x00000004, // values positive, for DATASOURCE_SSV before summing
- RRDR_OPTION_MIN2MAX = 0x00000008, // when adding dimensions, use max - min, instead of sum
- RRDR_OPTION_SECONDS = 0x00000010, // output seconds, instead of dates
- RRDR_OPTION_MILLISECONDS = 0x00000020, // output milliseconds, instead of dates
- RRDR_OPTION_NULL2ZERO = 0x00000040, // do not show nulls, convert them to zeros
- RRDR_OPTION_OBJECTSROWS = 0x00000080, // each row of values should be an object, not an array
- RRDR_OPTION_GOOGLE_JSON = 0x00000100, // comply with google JSON/JSONP specs
- RRDR_OPTION_JSON_WRAP = 0x00000200, // wrap the response in a JSON header with info about the result
- RRDR_OPTION_LABEL_QUOTES = 0x00000400, // in CSV output, wrap header labels in double quotes
- RRDR_OPTION_PERCENTAGE = 0x00000800, // give values as percentage of total
- RRDR_OPTION_NOT_ALIGNED = 0x00001000, // do not align charts for persistent timeframes
- RRDR_OPTION_DISPLAY_ABS = 0x00002000, // for badges, display the absolute value, but calculate colors with sign
- RRDR_OPTION_MATCH_IDS = 0x00004000, // when filtering dimensions, match only IDs
- RRDR_OPTION_MATCH_NAMES = 0x00008000, // when filtering dimensions, match only names
- RRDR_OPTION_NATURAL_POINTS = 0x00020000, // return the natural points of the database
- RRDR_OPTION_VIRTUAL_POINTS = 0x00040000, // return virtual points
- RRDR_OPTION_ANOMALY_BIT = 0x00080000, // Return the anomaly bit stored in each collected_number
- RRDR_OPTION_RETURN_RAW = 0x00100000, // Return raw data for aggregating across multiple nodes
- RRDR_OPTION_RETURN_JWAR = 0x00200000, // Return anomaly rates in jsonwrap
- RRDR_OPTION_SELECTED_TIER = 0x00400000, // Use the selected tier for the query
- RRDR_OPTION_ALL_DIMENSIONS = 0x00800000, // Return the full dimensions list
- RRDR_OPTION_SHOW_PLAN = 0x01000000, // Return the query plan in jsonwrap
+ RRDR_OPTION_NONZERO = (1 << 0), // don't output dimensions with just zero values
+ RRDR_OPTION_REVERSED = (1 << 1), // output the rows in reverse order (oldest to newest)
+ RRDR_OPTION_ABSOLUTE = (1 << 2), // values positive, for DATASOURCE_SSV before summing
+ RRDR_OPTION_MIN2MAX = (1 << 3), // when adding dimensions, use max - min, instead of sum
+ RRDR_OPTION_SECONDS = (1 << 4), // output seconds, instead of dates
+ RRDR_OPTION_MILLISECONDS = (1 << 5), // output milliseconds, instead of dates
+ RRDR_OPTION_NULL2ZERO = (1 << 6), // do not show nulls, convert them to zeros
+ RRDR_OPTION_OBJECTSROWS = (1 << 7), // each row of values should be an object, not an array
+ RRDR_OPTION_GOOGLE_JSON = (1 << 8), // comply with google JSON/JSONP specs
+ RRDR_OPTION_JSON_WRAP = (1 << 9), // wrap the response in a JSON header with info about the result
+ RRDR_OPTION_LABEL_QUOTES = (1 << 10), // in CSV output, wrap header labels in double quotes
+ RRDR_OPTION_PERCENTAGE = (1 << 11), // give values as percentage of total
+ RRDR_OPTION_NOT_ALIGNED = (1 << 12), // do not align charts for persistent timeframes
+ RRDR_OPTION_DISPLAY_ABS = (1 << 13), // for badges, display the absolute value, but calculate colors with sign
+ RRDR_OPTION_MATCH_IDS = (1 << 14), // when filtering dimensions, match only IDs
+ RRDR_OPTION_MATCH_NAMES = (1 << 15), // when filtering dimensions, match only names
+ RRDR_OPTION_NATURAL_POINTS = (1 << 16), // return the natural points of the database
+ RRDR_OPTION_VIRTUAL_POINTS = (1 << 17), // return virtual points
+ RRDR_OPTION_ANOMALY_BIT = (1 << 18), // Return the anomaly bit stored in each collected_number
+ RRDR_OPTION_RETURN_RAW = (1 << 19), // Return raw data for aggregating across multiple nodes
+ RRDR_OPTION_RETURN_JWAR = (1 << 20), // Return anomaly rates in jsonwrap
+ RRDR_OPTION_SELECTED_TIER = (1 << 21), // Use the selected tier for the query
+ RRDR_OPTION_ALL_DIMENSIONS = (1 << 22), // Return the full dimensions list
+ RRDR_OPTION_SHOW_DETAILS = (1 << 23), // v2 returns detailed object tree
+ RRDR_OPTION_DEBUG = (1 << 24), // v2 returns request description
+ RRDR_OPTION_MINIFY = (1 << 25), // remove JSON spaces and newlines from JSON output
+ RRDR_OPTION_GROUP_BY_LABELS = (1 << 26), // v2 returns flattened labels per dimension of the chart
// internal ones - not to be exposed to the API
- RRDR_OPTION_INTERNAL_AR = 0x10000000, // internal use only, to let the formatters we want to render the anomaly rate
- RRDR_OPTION_HEALTH_RSRVD1 = 0x80000000, // reserved for RRDCALC_OPTION_NO_CLEAR_NOTIFICATION
+ RRDR_OPTION_HEALTH_RSRVD1 = (1 << 30), // reserved for RRDCALC_OPTION_NO_CLEAR_NOTIFICATION
+ RRDR_OPTION_INTERNAL_AR = (1 << 31), // internal use only, to let the formatters know we want to render the anomaly rate
} RRDR_OPTIONS;
-typedef enum rrdr_value_flag {
- RRDR_VALUE_NOTHING = 0x00, // no flag set (a good default)
- RRDR_VALUE_EMPTY = 0x01, // the database value is empty
- RRDR_VALUE_RESET = 0x02, // the database value is marked as reset (overflown)
+typedef enum __attribute__ ((__packed__)) rrdr_value_flag {
+
+ // IMPORTANT:
+ // THIS IS AN AGREED BIT MAP BETWEEN AGENT, CLOUD FRONT-END AND CLOUD BACK-END
+ // DO NOT CHANGE THE MAPPINGS !
+
+ RRDR_VALUE_NOTHING = 0, // no flag set (a good default)
+ RRDR_VALUE_EMPTY = (1 << 0), // the database value is empty
+ RRDR_VALUE_RESET = (1 << 1), // the database value is marked as reset (overflown)
+ RRDR_VALUE_PARTIAL = (1 << 2), // the database provides partial data about this point (used in group-by)
} RRDR_VALUE_FLAGS;
-typedef enum rrdr_dimension_flag {
- RRDR_DIMENSION_DEFAULT = 0x00,
- RRDR_DIMENSION_HIDDEN = 0x04, // the dimension is hidden (not to be presented to callers)
- RRDR_DIMENSION_NONZERO = 0x08, // the dimension is non zero (contains non-zero values)
- RRDR_DIMENSION_QUERIED = 0x10, // the dimension is selected for evaluation in this RRDR
+typedef enum __attribute__ ((__packed__)) rrdr_dimension_flag {
+ RRDR_DIMENSION_DEFAULT = 0,
+ RRDR_DIMENSION_HIDDEN = (1 << 0), // the dimension is hidden (not to be presented to callers)
+ RRDR_DIMENSION_NONZERO = (1 << 1), // the dimension is non zero (contains non-zero values)
+ RRDR_DIMENSION_SELECTED = (1 << 2), // the dimension has been selected for query
+ RRDR_DIMENSION_QUERIED = (1 << 3), // the dimension has been queried
+ RRDR_DIMENSION_FAILED = (1 << 4), // the dimension failed to be queried
+ RRDR_DIMENSION_GROUPED = (1 << 5), // the dimension has been grouped in this RRDR
} RRDR_DIMENSION_FLAGS;
// RRDR result options
-typedef enum rrdr_result_flags {
- RRDR_RESULT_OPTION_ABSOLUTE = 0x00000001, // the query uses absolute time-frames
- // (can be cached by browsers and proxies)
- RRDR_RESULT_OPTION_RELATIVE = 0x00000002, // the query uses relative time-frames
- // (should not to be cached by browsers and proxies)
- RRDR_RESULT_OPTION_VARIABLE_STEP = 0x00000004, // the query uses variable-step time-frames
- RRDR_RESULT_OPTION_CANCEL = 0x00000008, // the query needs to be cancelled
-} RRDR_RESULT_OPTIONS;
+typedef enum __attribute__ ((__packed__)) rrdr_result_flags {
+ RRDR_RESULT_FLAG_ABSOLUTE = (1 << 0), // the query uses absolute time-frames
+ // (can be cached by browsers and proxies)
+ RRDR_RESULT_FLAG_RELATIVE = (1 << 1), // the query uses relative time-frames
+ // (should not to be cached by browsers and proxies)
+ RRDR_RESULT_FLAG_CANCEL = (1 << 2), // the query needs to be cancelled
+} RRDR_RESULT_FLAGS;
-typedef struct rrdresult {
- RRDR_RESULT_OPTIONS result_options; // RRDR_RESULT_OPTION_*
+#define RRDR_DVIEW_ANOMALY_COUNT_MULTIPLIER 1000.0
+typedef struct rrdresult {
size_t d; // the number of dimensions
- size_t n; // the number of values in the arrays
- size_t rows; // the number of rows used
+ size_t n; // the number of values in the arrays (number of points per dimension)
+ size_t rows; // the number of actual rows used
RRDR_DIMENSION_FLAGS *od; // the options for the dimensions
+ STRING **di; // array of d dimension ids
+ STRING **dn; // array of d dimension names
+ STRING **du; // array of d dimension units
+ uint32_t *dgbs; // array of d dimension group by slots - NOT ALLOCATED when RRDR is created
+ uint32_t *dgbc; // array of d dimension group by counts - NOT ALLOCATED when RRDR is created
+ uint32_t *dp; // array of d dimension priority - NOT ALLOCATED when RRDR is created
+ DICTIONARY **dl; // array of d dimension labels - NOT ALLOCATED when RRDR is created
+ STORAGE_POINT *dqp; // array of d dimensions query points - NOT ALLOCATED when RRDR is created
+ STORAGE_POINT *dview; // array of d dimensions group by view - NOT ALLOCATED when RRDR is created
+ NETDATA_DOUBLE *vh; // array of n x d hidden values, while grouping - NOT ALLOCATED when RRDR is created
+
+ DICTIONARY *label_keys;
+
time_t *t; // array of n timestamps
NETDATA_DOUBLE *v; // array n x d values
RRDR_VALUE_FLAGS *o; // array n x d options for each value returned
NETDATA_DOUBLE *ar; // array n x d of anomaly rates (0 - 100)
+ uint32_t *gbc; // array n x d of group by count - NOT ALLOCATED when RRDR is created
- size_t group; // how many collected values were grouped for each row
- time_t update_every; // what is the suggested update frequency in seconds
-
- NETDATA_DOUBLE min;
- NETDATA_DOUBLE max;
+ struct {
+ size_t group; // how many collected values were grouped for each row - NEEDED BY GROUPING FUNCTIONS
+ time_t after;
+ time_t before;
+ time_t update_every; // what is the suggested update frequency in seconds
+ NETDATA_DOUBLE min;
+ NETDATA_DOUBLE max;
+ RRDR_RESULT_FLAGS flags; // RRDR_RESULT_FLAG_*
+ } view;
- time_t before;
- time_t after;
+ struct {
+ size_t db_points_read;
+ size_t result_points_generated;
+ } stats;
- // internal rrd2rrdr() members below this point
struct {
- ONEWAYALLOC *owa; // the allocator used
- struct query_target *qt; // the QUERY_TARGET
+ void *data; // the internal data of the grouping function
- RRDR_OPTIONS query_options; // RRDR_OPTION_* (as run by the query)
+ // grouping function pointers
+ RRDR_TIME_GROUPING add_flush;
+ void (*create)(struct rrdresult *r, const char *options);
+ void (*reset)(struct rrdresult *r);
+ void (*free)(struct rrdresult *r);
+ void (*add)(struct rrdresult *r, NETDATA_DOUBLE value);
+ NETDATA_DOUBLE (*flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+ TIER_QUERY_FETCH tier_query_fetch; // which value to use from STORAGE_POINT
size_t points_wanted; // used by SES and DES
size_t resampling_group; // used by AVERAGE
NETDATA_DOUBLE resampling_divisor; // used by AVERAGE
+ } time_grouping;
- // grouping function pointers
- void (*grouping_create)(struct rrdresult *r, const char *options);
- void (*grouping_reset)(struct rrdresult *r);
- void (*grouping_free)(struct rrdresult *r);
- void (*grouping_add)(struct rrdresult *r, NETDATA_DOUBLE value);
- NETDATA_DOUBLE (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+ struct {
+ struct rrdresult *r;
+ } group_by;
- TIER_QUERY_FETCH tier_query_fetch; // which value to use from STORAGE_POINT
- void *grouping_data; // the internal data of the grouping function
+ struct {
+ time_t max_update_every;
+ time_t expected_after;
+ time_t trimmed_after;
+ } partial_data_trimming;
+
+ struct {
+ ONEWAYALLOC *owa; // the allocator used
+ struct query_target *qt; // the QUERY_TARGET
+ size_t contexts; // temp needed between json_wrapper_begin2() and json_wrapper_end2()
+ size_t queries_count; // temp needed to know if a query is the first executed
#ifdef NETDATA_INTERNAL_CHECKS
const char *log;
#endif
- // statistics
- size_t db_points_read;
- size_t result_points_generated;
- size_t tier_points_read[RRD_STORAGE_TIERS];
+ struct query_target *release_with_rrdr_qt;
} internal;
} RRDR;
@@ -130,7 +171,7 @@ typedef struct rrdresult {
#include "database/rrd.h"
void rrdr_free(ONEWAYALLOC *owa, RRDR *r);
-RRDR *rrdr_create(ONEWAYALLOC *owa, struct query_target *qt);
+RRDR *rrdr_create(ONEWAYALLOC *owa, struct query_target *qt, size_t dimensions, size_t points);
#include "../web_api_v1.h"
#include "web/api/queries/query.h"
@@ -138,14 +179,14 @@ RRDR *rrdr_create(ONEWAYALLOC *owa, struct query_target *qt);
RRDR *rrd2rrdr_legacy(
ONEWAYALLOC *owa,
RRDSET *st, size_t points, time_t after, time_t before,
- RRDR_GROUPING group_method, time_t resampling_time, RRDR_OPTIONS options, const char *dimensions,
- const char *group_options, time_t timeout, size_t tier, QUERY_SOURCE query_source,
+ RRDR_TIME_GROUPING group_method, time_t resampling_time, RRDR_OPTIONS options, const char *dimensions,
+ const char *group_options, time_t timeout_ms, size_t tier, QUERY_SOURCE query_source,
STORAGE_PRIORITY priority);
RRDR *rrd2rrdr(ONEWAYALLOC *owa, struct query_target *qt);
bool query_target_calculate_window(struct query_target *qt);
-bool rrdr_relative_window_to_absolute(time_t *after, time_t *before);
+bool rrdr_relative_window_to_absolute(time_t *after, time_t *before, time_t *now_ptr);
#ifdef __cplusplus
}
diff --git a/web/api/queries/ses/README.md b/web/api/queries/ses/README.md
index b835b8120..56634d36e 100644
--- a/web/api/queries/ses/README.md
+++ b/web/api/queries/ses/README.md
@@ -1,6 +1,10 @@
<!--
title: "Single (or Simple) Exponential Smoothing (`ses`)"
+sidebar_label: "Single (or Simple) Exponential Smoothing (`ses`)"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/ses/README.md
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Queries"
-->
# Single (or Simple) Exponential Smoothing (`ses`)
diff --git a/web/api/queries/ses/ses.c b/web/api/queries/ses/ses.c
index 5e94002c3..39eb445a0 100644
--- a/web/api/queries/ses/ses.c
+++ b/web/api/queries/ses/ses.c
@@ -6,85 +6,3 @@
// ----------------------------------------------------------------------------
// single exponential smoothing
-struct grouping_ses {
- NETDATA_DOUBLE alpha;
- NETDATA_DOUBLE alpha_other;
- NETDATA_DOUBLE level;
- size_t count;
-};
-
-static size_t max_window_size = 15;
-
-void grouping_init_ses(void) {
- long long ret = config_get_number(CONFIG_SECTION_WEB, "ses max window", (long long)max_window_size);
- if(ret <= 1) {
- config_set_number(CONFIG_SECTION_WEB, "ses max window", (long long)max_window_size);
- }
- else {
- max_window_size = (size_t) ret;
- }
-}
-
-static inline NETDATA_DOUBLE window(RRDR *r, struct grouping_ses *g) {
- (void)g;
-
- NETDATA_DOUBLE points;
- if(r->group == 1) {
- // provide a running DES
- points = (NETDATA_DOUBLE)r->internal.points_wanted;
- }
- else {
- // provide a SES with flush points
- points = (NETDATA_DOUBLE)r->group;
- }
-
- return (points > (NETDATA_DOUBLE)max_window_size) ? (NETDATA_DOUBLE)max_window_size : points;
-}
-
-static inline void set_alpha(RRDR *r, struct grouping_ses *g) {
- // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
- // A commonly used value for alpha is 2 / (N + 1)
- g->alpha = 2.0 / (window(r, g) + 1.0);
- g->alpha_other = 1.0 - g->alpha;
-}
-
-void grouping_create_ses(RRDR *r, const char *options __maybe_unused) {
- struct grouping_ses *g = (struct grouping_ses *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_ses));
- set_alpha(r, g);
- g->level = 0.0;
- r->internal.grouping_data = g;
-}
-
-// resets when switches dimensions
-// so, clear everything to restart
-void grouping_reset_ses(RRDR *r) {
- struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data;
- g->level = 0.0;
- g->count = 0;
-}
-
-void grouping_free_ses(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
- r->internal.grouping_data = NULL;
-}
-
-void grouping_add_ses(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data;
-
- if(unlikely(!g->count))
- g->level = value;
-
- g->level = g->alpha * value + g->alpha_other * g->level;
- g->count++;
-}
-
-NETDATA_DOUBLE grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data;
-
- if(unlikely(!g->count || !netdata_double_isnumber(g->level))) {
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- return 0.0;
- }
-
- return g->level;
-}
diff --git a/web/api/queries/ses/ses.h b/web/api/queries/ses/ses.h
index 79b09fbdf..de8645ff0 100644
--- a/web/api/queries/ses/ses.h
+++ b/web/api/queries/ses/ses.h
@@ -6,12 +6,87 @@
#include "../query.h"
#include "../rrdr.h"
-void grouping_init_ses(void);
+struct tg_ses {
+ NETDATA_DOUBLE alpha;
+ NETDATA_DOUBLE alpha_other;
+ NETDATA_DOUBLE level;
+ size_t count;
+};
-void grouping_create_ses(RRDR *r, const char *options __maybe_unused);
-void grouping_reset_ses(RRDR *r);
-void grouping_free_ses(RRDR *r);
-void grouping_add_ses(RRDR *r, NETDATA_DOUBLE value);
-NETDATA_DOUBLE grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+static size_t tg_ses_max_window_size = 15;
+
+static inline void tg_ses_init(void) {
+ long long ret = config_get_number(CONFIG_SECTION_WEB, "ses max tg_des_window", (long long)tg_ses_max_window_size);
+ if(ret <= 1) {
+ config_set_number(CONFIG_SECTION_WEB, "ses max tg_des_window", (long long)tg_ses_max_window_size);
+ }
+ else {
+ tg_ses_max_window_size = (size_t) ret;
+ }
+}
+
+static inline NETDATA_DOUBLE tg_ses_window(RRDR *r, struct tg_ses *g) {
+ (void)g;
+
+ NETDATA_DOUBLE points;
+ if(r->view.group == 1) {
+ // provide a running DES
+ points = (NETDATA_DOUBLE)r->time_grouping.points_wanted;
+ }
+ else {
+ // provide a SES with flush points
+ points = (NETDATA_DOUBLE)r->view.group;
+ }
+
+ return (points > (NETDATA_DOUBLE)tg_ses_max_window_size) ? (NETDATA_DOUBLE)tg_ses_max_window_size : points;
+}
+
+static inline void tg_ses_set_alpha(RRDR *r, struct tg_ses *g) {
+ // https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
+ // A commonly used value for alpha is 2 / (N + 1)
+ g->alpha = 2.0 / (tg_ses_window(r, g) + 1.0);
+ g->alpha_other = 1.0 - g->alpha;
+}
+
+static inline void tg_ses_create(RRDR *r, const char *options __maybe_unused) {
+ struct tg_ses *g = (struct tg_ses *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct tg_ses));
+ tg_ses_set_alpha(r, g);
+ g->level = 0.0;
+ r->time_grouping.data = g;
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+static inline void tg_ses_reset(RRDR *r) {
+ struct tg_ses *g = (struct tg_ses *)r->time_grouping.data;
+ g->level = 0.0;
+ g->count = 0;
+}
+
+static inline void tg_ses_free(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
+}
+
+static inline void tg_ses_add(RRDR *r, NETDATA_DOUBLE value) {
+ struct tg_ses *g = (struct tg_ses *)r->time_grouping.data;
+
+ if(unlikely(!g->count))
+ g->level = value;
+
+ g->level = g->alpha * value + g->alpha_other * g->level;
+ g->count++;
+}
+
+static inline NETDATA_DOUBLE tg_ses_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct tg_ses *g = (struct tg_ses *)r->time_grouping.data;
+
+ if(unlikely(!g->count || !netdata_double_isnumber(g->level))) {
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ return 0.0;
+ }
+
+ return g->level;
+}
#endif //NETDATA_API_QUERIES_SES_H
diff --git a/web/api/queries/stddev/README.md b/web/api/queries/stddev/README.md
index 2fca47d5e..f0586a062 100644
--- a/web/api/queries/stddev/README.md
+++ b/web/api/queries/stddev/README.md
@@ -1,6 +1,10 @@
<!--
title: "standard deviation (`stddev`)"
+sidebar_label: "standard deviation (`stddev`)"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/stddev/README.md
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Queries"
-->
# standard deviation (`stddev`)
diff --git a/web/api/queries/stddev/stddev.c b/web/api/queries/stddev/stddev.c
index 92a67b42d..8f5431194 100644
--- a/web/api/queries/stddev/stddev.c
+++ b/web/api/queries/stddev/stddev.c
@@ -6,123 +6,11 @@
// ----------------------------------------------------------------------------
// stddev
-// this implementation comes from:
-// https://www.johndcook.com/blog/standard_deviation/
-
-struct grouping_stddev {
- long count;
- NETDATA_DOUBLE m_oldM, m_newM, m_oldS, m_newS;
-};
-
-void grouping_create_stddev(RRDR *r, const char *options __maybe_unused) {
- r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_stddev));
-}
-
-// resets when switches dimensions
-// so, clear everything to restart
-void grouping_reset_stddev(RRDR *r) {
- struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
- g->count = 0;
-}
-
-void grouping_free_stddev(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
- r->internal.grouping_data = NULL;
-}
-
-void grouping_add_stddev(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
-
- g->count++;
-
- // See Knuth TAOCP vol 2, 3rd edition, page 232
- if (g->count == 1) {
- g->m_oldM = g->m_newM = value;
- g->m_oldS = 0.0;
- }
- else {
- g->m_newM = g->m_oldM + (value - g->m_oldM) / g->count;
- g->m_newS = g->m_oldS + (value - g->m_oldM) * (value - g->m_newM);
-
- // set up for next iteration
- g->m_oldM = g->m_newM;
- g->m_oldS = g->m_newS;
- }
-}
-
-static inline NETDATA_DOUBLE mean(struct grouping_stddev *g) {
- return (g->count > 0) ? g->m_newM : 0.0;
-}
-
-static inline NETDATA_DOUBLE variance(struct grouping_stddev *g) {
- return ( (g->count > 1) ? g->m_newS/(NETDATA_DOUBLE)(g->count - 1) : 0.0 );
-}
-static inline NETDATA_DOUBLE stddev(struct grouping_stddev *g) {
- return sqrtndd(variance(g));
-}
-
-NETDATA_DOUBLE grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
-
- NETDATA_DOUBLE value;
-
- if(likely(g->count > 1)) {
- value = stddev(g);
-
- if(!netdata_double_isnumber(value)) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
- }
- else if(g->count == 1) {
- value = 0.0;
- }
- else {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
-
- grouping_reset_stddev(r);
-
- return value;
-}
-
-// https://en.wikipedia.org/wiki/Coefficient_of_variation
-NETDATA_DOUBLE grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
-
- NETDATA_DOUBLE value;
-
- if(likely(g->count > 1)) {
- NETDATA_DOUBLE m = mean(g);
- value = 100.0 * stddev(g) / ((m < 0)? -m : m);
-
- if(unlikely(!netdata_double_isnumber(value))) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
- }
- else if(g->count == 1) {
- // one value collected
- value = 0.0;
- }
- else {
- // no values collected
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
-
- grouping_reset_stddev(r);
-
- return value;
-}
-
-
/*
* Mean = average
*
NETDATA_DOUBLE grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
+ struct grouping_stddev *g = (struct grouping_stddev *)r->grouping.grouping_data;
NETDATA_DOUBLE value;
@@ -149,7 +37,7 @@ NETDATA_DOUBLE grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options
* It is not advised to use this version of variance directly
*
NETDATA_DOUBLE grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
+ struct grouping_stddev *g = (struct grouping_stddev *)r->grouping.grouping_data;
NETDATA_DOUBLE value;
diff --git a/web/api/queries/stddev/stddev.h b/web/api/queries/stddev/stddev.h
index 4b8ffcd53..f7a1a06c3 100644
--- a/web/api/queries/stddev/stddev.h
+++ b/web/api/queries/stddev/stddev.h
@@ -6,13 +6,115 @@
#include "../query.h"
#include "../rrdr.h"
-void grouping_create_stddev(RRDR *r, const char *options __maybe_unused);
-void grouping_reset_stddev(RRDR *r);
-void grouping_free_stddev(RRDR *r);
-void grouping_add_stddev(RRDR *r, NETDATA_DOUBLE value);
-NETDATA_DOUBLE grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
-NETDATA_DOUBLE grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
-// NETDATA_DOUBLE grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
-// NETDATA_DOUBLE grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+// this implementation comes from:
+// https://www.johndcook.com/blog/standard_deviation/
+
+struct tg_stddev {
+ long count;
+ NETDATA_DOUBLE m_oldM, m_newM, m_oldS, m_newS;
+};
+
+static inline void tg_stddev_create(RRDR *r, const char *options __maybe_unused) {
+ r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct tg_stddev));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+static inline void tg_stddev_reset(RRDR *r) {
+ struct tg_stddev *g = (struct tg_stddev *)r->time_grouping.data;
+ g->count = 0;
+}
+
+static inline void tg_stddev_free(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
+}
+
+static inline void tg_stddev_add(RRDR *r, NETDATA_DOUBLE value) {
+ struct tg_stddev *g = (struct tg_stddev *)r->time_grouping.data;
+
+ g->count++;
+
+ // See Knuth TAOCP vol 2, 3rd edition, page 232
+ if (g->count == 1) {
+ g->m_oldM = g->m_newM = value;
+ g->m_oldS = 0.0;
+ }
+ else {
+ g->m_newM = g->m_oldM + (value - g->m_oldM) / g->count;
+ g->m_newS = g->m_oldS + (value - g->m_oldM) * (value - g->m_newM);
+
+ // set up for next iteration
+ g->m_oldM = g->m_newM;
+ g->m_oldS = g->m_newS;
+ }
+}
+
+static inline NETDATA_DOUBLE tg_stddev_mean(struct tg_stddev *g) {
+ return (g->count > 0) ? g->m_newM : 0.0;
+}
+
+static inline NETDATA_DOUBLE tg_stddev_variance(struct tg_stddev *g) {
+ return ( (g->count > 1) ? g->m_newS/(NETDATA_DOUBLE)(g->count - 1) : 0.0 );
+}
+static inline NETDATA_DOUBLE tg_stddev_stddev(struct tg_stddev *g) {
+ return sqrtndd(tg_stddev_variance(g));
+}
+
+static inline NETDATA_DOUBLE tg_stddev_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct tg_stddev *g = (struct tg_stddev *)r->time_grouping.data;
+
+ NETDATA_DOUBLE value;
+
+ if(likely(g->count > 1)) {
+ value = tg_stddev_stddev(g);
+
+ if(!netdata_double_isnumber(value)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ }
+ else if(g->count == 1) {
+ value = 0.0;
+ }
+ else {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ tg_stddev_reset(r);
+
+ return value;
+}
+
+// https://en.wikipedia.org/wiki/Coefficient_of_variation
+static inline NETDATA_DOUBLE tg_stddev_coefficient_of_variation_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct tg_stddev *g = (struct tg_stddev *)r->time_grouping.data;
+
+ NETDATA_DOUBLE value;
+
+ if(likely(g->count > 1)) {
+ NETDATA_DOUBLE m = tg_stddev_mean(g);
+ value = 100.0 * tg_stddev_stddev(g) / ((m < 0)? -m : m);
+
+ if(unlikely(!netdata_double_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ }
+ else if(g->count == 1) {
+ // one value collected
+ value = 0.0;
+ }
+ else {
+ // no values collected
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ tg_stddev_reset(r);
+
+ return value;
+}
#endif //NETDATA_API_QUERIES_STDDEV_H
diff --git a/web/api/queries/sum/README.md b/web/api/queries/sum/README.md
index d4465bd82..62e18acab 100644
--- a/web/api/queries/sum/README.md
+++ b/web/api/queries/sum/README.md
@@ -1,6 +1,10 @@
<!--
title: "Sum"
+sidebar_label: "Sum"
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/sum/README.md
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Queries"
-->
# Sum
diff --git a/web/api/queries/sum/sum.c b/web/api/queries/sum/sum.c
index eec6e2ad0..cf4484217 100644
--- a/web/api/queries/sum/sum.c
+++ b/web/api/queries/sum/sum.c
@@ -5,51 +5,5 @@
// ----------------------------------------------------------------------------
// sum
-struct grouping_sum {
- NETDATA_DOUBLE sum;
- size_t count;
-};
-
-void grouping_create_sum(RRDR *r, const char *options __maybe_unused) {
- r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_sum));
-}
-
-// resets when switches dimensions
-// so, clear everything to restart
-void grouping_reset_sum(RRDR *r) {
- struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data;
- g->sum = 0;
- g->count = 0;
-}
-
-void grouping_free_sum(RRDR *r) {
- onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
- r->internal.grouping_data = NULL;
-}
-
-void grouping_add_sum(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data;
- g->sum += value;
- g->count++;
-}
-
-NETDATA_DOUBLE grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data;
-
- NETDATA_DOUBLE value;
-
- if(unlikely(!g->count)) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
- else {
- value = g->sum;
- }
-
- g->sum = 0.0;
- g->count = 0;
-
- return value;
-}
diff --git a/web/api/queries/sum/sum.h b/web/api/queries/sum/sum.h
index 898782775..5e07f45d6 100644
--- a/web/api/queries/sum/sum.h
+++ b/web/api/queries/sum/sum.h
@@ -6,10 +6,51 @@
#include "../query.h"
#include "../rrdr.h"
-void grouping_create_sum(RRDR *r, const char *options __maybe_unused);
-void grouping_reset_sum(RRDR *r);
-void grouping_free_sum(RRDR *r);
-void grouping_add_sum(RRDR *r, NETDATA_DOUBLE value);
-NETDATA_DOUBLE grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+struct tg_sum {
+ NETDATA_DOUBLE sum;
+ size_t count;
+};
+
+static inline void tg_sum_create(RRDR *r, const char *options __maybe_unused) {
+ r->time_grouping.data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct tg_sum));
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+static inline void tg_sum_reset(RRDR *r) {
+ struct tg_sum *g = (struct tg_sum *)r->time_grouping.data;
+ g->sum = 0;
+ g->count = 0;
+}
+
+static inline void tg_sum_free(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
+}
+
+static inline void tg_sum_add(RRDR *r, NETDATA_DOUBLE value) {
+ struct tg_sum *g = (struct tg_sum *)r->time_grouping.data;
+ g->sum += value;
+ g->count++;
+}
+
+static inline NETDATA_DOUBLE tg_sum_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct tg_sum *g = (struct tg_sum *)r->time_grouping.data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = g->sum;
+ }
+
+ g->sum = 0.0;
+ g->count = 0;
+
+ return value;
+}
#endif //NETDATA_API_QUERY_SUM_H
diff --git a/web/api/queries/trimmed_mean/README.md b/web/api/queries/trimmed_mean/README.md
index 71cdb85db..08a32b83b 100644
--- a/web/api/queries/trimmed_mean/README.md
+++ b/web/api/queries/trimmed_mean/README.md
@@ -1,7 +1,11 @@
<!--
title: "Trimmed Mean"
+sidebar_label: "Trimmed Mean"
description: "Use trimmed-mean in API queries and health entities to find the average value from a sample, eliminating any unwanted spikes in the returned metrics."
custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/trimmed_mean/README.md
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Developers/Web/Api/Queries"
-->
# Trimmed Mean
diff --git a/web/api/queries/trimmed_mean/trimmed_mean.c b/web/api/queries/trimmed_mean/trimmed_mean.c
index 2277208a7..c50db7ed6 100644
--- a/web/api/queries/trimmed_mean/trimmed_mean.c
+++ b/web/api/queries/trimmed_mean/trimmed_mean.c
@@ -5,162 +5,3 @@
// ----------------------------------------------------------------------------
// median
-struct grouping_trimmed_mean {
- size_t series_size;
- size_t next_pos;
- NETDATA_DOUBLE percent;
-
- NETDATA_DOUBLE *series;
-};
-
-static void grouping_create_trimmed_mean_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
- long entries = r->group;
- if(entries < 10) entries = 10;
-
- struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_trimmed_mean));
- g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
- g->series_size = (size_t)entries;
-
- g->percent = def;
- if(options && *options) {
- g->percent = str2ndd(options, NULL);
- if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
- if(g->percent < 0.0) g->percent = 0.0;
- if(g->percent > 50.0) g->percent = 50.0;
- }
-
- g->percent = 1.0 - ((g->percent / 100.0) * 2.0);
- r->internal.grouping_data = g;
-}
-
-void grouping_create_trimmed_mean1(RRDR *r, const char *options) {
- grouping_create_trimmed_mean_internal(r, options, 1.0);
-}
-void grouping_create_trimmed_mean2(RRDR *r, const char *options) {
- grouping_create_trimmed_mean_internal(r, options, 2.0);
-}
-void grouping_create_trimmed_mean3(RRDR *r, const char *options) {
- grouping_create_trimmed_mean_internal(r, options, 3.0);
-}
-void grouping_create_trimmed_mean5(RRDR *r, const char *options) {
- grouping_create_trimmed_mean_internal(r, options, 5.0);
-}
-void grouping_create_trimmed_mean10(RRDR *r, const char *options) {
- grouping_create_trimmed_mean_internal(r, options, 10.0);
-}
-void grouping_create_trimmed_mean15(RRDR *r, const char *options) {
- grouping_create_trimmed_mean_internal(r, options, 15.0);
-}
-void grouping_create_trimmed_mean20(RRDR *r, const char *options) {
- grouping_create_trimmed_mean_internal(r, options, 20.0);
-}
-void grouping_create_trimmed_mean25(RRDR *r, const char *options) {
- grouping_create_trimmed_mean_internal(r, options, 25.0);
-}
-
-// resets when switches dimensions
-// so, clear everything to restart
-void grouping_reset_trimmed_mean(RRDR *r) {
- struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
- g->next_pos = 0;
-}
-
-void grouping_free_trimmed_mean(RRDR *r) {
- struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
- if(g) onewayalloc_freez(r->internal.owa, g->series);
-
- onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
- r->internal.grouping_data = NULL;
-}
-
-void grouping_add_trimmed_mean(RRDR *r, NETDATA_DOUBLE value) {
- struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
-
- if(unlikely(g->next_pos >= g->series_size)) {
- g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
- g->series_size *= 2;
- }
-
- g->series[g->next_pos++] = value;
-}
-
-NETDATA_DOUBLE grouping_flush_trimmed_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
- struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
-
- NETDATA_DOUBLE value;
- size_t available_slots = g->next_pos;
-
- if(unlikely(!available_slots)) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
- else if(available_slots == 1) {
- value = g->series[0];
- }
- else {
- sort_series(g->series, available_slots);
-
- NETDATA_DOUBLE min = g->series[0];
- NETDATA_DOUBLE max = g->series[available_slots - 1];
-
- if (min != max) {
- size_t slots_to_use = (size_t)((NETDATA_DOUBLE)available_slots * g->percent);
- if(!slots_to_use) slots_to_use = 1;
-
- NETDATA_DOUBLE percent_to_use = (NETDATA_DOUBLE)slots_to_use / (NETDATA_DOUBLE)available_slots;
- NETDATA_DOUBLE percent_delta = g->percent - percent_to_use;
-
- NETDATA_DOUBLE percent_interpolation_slot = 0.0;
- NETDATA_DOUBLE percent_last_slot = 0.0;
- if(percent_delta > 0.0) {
- NETDATA_DOUBLE percent_to_use_plus_1_slot = (NETDATA_DOUBLE)(slots_to_use + 1) / (NETDATA_DOUBLE)available_slots;
- NETDATA_DOUBLE percent_1slot = percent_to_use_plus_1_slot - percent_to_use;
-
- percent_interpolation_slot = percent_delta / percent_1slot;
- percent_last_slot = 1 - percent_interpolation_slot;
- }
-
- int start_slot, stop_slot, step, last_slot, interpolation_slot;
- if(min >= 0.0 && max >= 0.0) {
- start_slot = (int)((available_slots - slots_to_use) / 2);
- stop_slot = start_slot + (int)slots_to_use;
- last_slot = stop_slot - 1;
- interpolation_slot = stop_slot;
- step = 1;
- }
- else {
- start_slot = (int)available_slots - 1 - (int)((available_slots - slots_to_use) / 2);
- stop_slot = start_slot - (int)slots_to_use;
- last_slot = stop_slot + 1;
- interpolation_slot = stop_slot;
- step = -1;
- }
-
- value = 0.0;
- for(int slot = start_slot; slot != stop_slot ; slot += step)
- value += g->series[slot];
-
- size_t counted = slots_to_use;
- if(percent_interpolation_slot > 0.0 && interpolation_slot >= 0 && interpolation_slot < (int)available_slots) {
- value += g->series[interpolation_slot] * percent_interpolation_slot;
- value += g->series[last_slot] * percent_last_slot;
- counted++;
- }
-
- value = value / (NETDATA_DOUBLE)counted;
- }
- else
- value = min;
- }
-
- if(unlikely(!netdata_double_isnumber(value))) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
- }
-
- //log_series_to_stderr(g->series, g->next_pos, value, "trimmed_mean");
-
- g->next_pos = 0;
-
- return value;
-}
diff --git a/web/api/queries/trimmed_mean/trimmed_mean.h b/web/api/queries/trimmed_mean/trimmed_mean.h
index e66d92541..3c09015bf 100644
--- a/web/api/queries/trimmed_mean/trimmed_mean.h
+++ b/web/api/queries/trimmed_mean/trimmed_mean.h
@@ -6,17 +6,164 @@
#include "../query.h"
#include "../rrdr.h"
-void grouping_create_trimmed_mean1(RRDR *r, const char *options);
-void grouping_create_trimmed_mean2(RRDR *r, const char *options);
-void grouping_create_trimmed_mean3(RRDR *r, const char *options);
-void grouping_create_trimmed_mean5(RRDR *r, const char *options);
-void grouping_create_trimmed_mean10(RRDR *r, const char *options);
-void grouping_create_trimmed_mean15(RRDR *r, const char *options);
-void grouping_create_trimmed_mean20(RRDR *r, const char *options);
-void grouping_create_trimmed_mean25(RRDR *r, const char *options);
-void grouping_reset_trimmed_mean(RRDR *r);
-void grouping_free_trimmed_mean(RRDR *r);
-void grouping_add_trimmed_mean(RRDR *r, NETDATA_DOUBLE value);
-NETDATA_DOUBLE grouping_flush_trimmed_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+struct tg_trimmed_mean {
+ size_t series_size;
+ size_t next_pos;
+ NETDATA_DOUBLE percent;
+
+ NETDATA_DOUBLE *series;
+};
+
+static inline void tg_trimmed_mean_create_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
+ long entries = r->view.group;
+ if(entries < 10) entries = 10;
+
+ struct tg_trimmed_mean *g = (struct tg_trimmed_mean *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct tg_trimmed_mean));
+ g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
+ g->series_size = (size_t)entries;
+
+ g->percent = def;
+ if(options && *options) {
+ g->percent = str2ndd(options, NULL);
+ if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
+ if(g->percent < 0.0) g->percent = 0.0;
+ if(g->percent > 50.0) g->percent = 50.0;
+ }
+
+ g->percent = 1.0 - ((g->percent / 100.0) * 2.0);
+ r->time_grouping.data = g;
+}
+
+static inline void tg_trimmed_mean_create_1(RRDR *r, const char *options) {
+ tg_trimmed_mean_create_internal(r, options, 1.0);
+}
+static inline void tg_trimmed_mean_create_2(RRDR *r, const char *options) {
+ tg_trimmed_mean_create_internal(r, options, 2.0);
+}
+static inline void tg_trimmed_mean_create_3(RRDR *r, const char *options) {
+ tg_trimmed_mean_create_internal(r, options, 3.0);
+}
+static inline void tg_trimmed_mean_create_5(RRDR *r, const char *options) {
+ tg_trimmed_mean_create_internal(r, options, 5.0);
+}
+static inline void tg_trimmed_mean_create_10(RRDR *r, const char *options) {
+ tg_trimmed_mean_create_internal(r, options, 10.0);
+}
+static inline void tg_trimmed_mean_create_15(RRDR *r, const char *options) {
+ tg_trimmed_mean_create_internal(r, options, 15.0);
+}
+static inline void tg_trimmed_mean_create_20(RRDR *r, const char *options) {
+ tg_trimmed_mean_create_internal(r, options, 20.0);
+}
+static inline void tg_trimmed_mean_create_25(RRDR *r, const char *options) {
+ tg_trimmed_mean_create_internal(r, options, 25.0);
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+static inline void tg_trimmed_mean_reset(RRDR *r) {
+ struct tg_trimmed_mean *g = (struct tg_trimmed_mean *)r->time_grouping.data;
+ g->next_pos = 0;
+}
+
+static inline void tg_trimmed_mean_free(RRDR *r) {
+ struct tg_trimmed_mean *g = (struct tg_trimmed_mean *)r->time_grouping.data;
+ if(g) onewayalloc_freez(r->internal.owa, g->series);
+
+ onewayalloc_freez(r->internal.owa, r->time_grouping.data);
+ r->time_grouping.data = NULL;
+}
+
+static inline void tg_trimmed_mean_add(RRDR *r, NETDATA_DOUBLE value) {
+ struct tg_trimmed_mean *g = (struct tg_trimmed_mean *)r->time_grouping.data;
+
+ if(unlikely(g->next_pos >= g->series_size)) {
+ g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
+ g->series_size *= 2;
+ }
+
+ g->series[g->next_pos++] = value;
+}
+
+static inline NETDATA_DOUBLE tg_trimmed_mean_flush(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct tg_trimmed_mean *g = (struct tg_trimmed_mean *)r->time_grouping.data;
+
+ NETDATA_DOUBLE value;
+ size_t available_slots = g->next_pos;
+
+ if(unlikely(!available_slots)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else if(available_slots == 1) {
+ value = g->series[0];
+ }
+ else {
+ sort_series(g->series, available_slots);
+
+ NETDATA_DOUBLE min = g->series[0];
+ NETDATA_DOUBLE max = g->series[available_slots - 1];
+
+ if (min != max) {
+ size_t slots_to_use = (size_t)((NETDATA_DOUBLE)available_slots * g->percent);
+ if(!slots_to_use) slots_to_use = 1;
+
+ NETDATA_DOUBLE percent_to_use = (NETDATA_DOUBLE)slots_to_use / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_delta = g->percent - percent_to_use;
+
+ NETDATA_DOUBLE percent_interpolation_slot = 0.0;
+ NETDATA_DOUBLE percent_last_slot = 0.0;
+ if(percent_delta > 0.0) {
+ NETDATA_DOUBLE percent_to_use_plus_1_slot = (NETDATA_DOUBLE)(slots_to_use + 1) / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_1slot = percent_to_use_plus_1_slot - percent_to_use;
+
+ percent_interpolation_slot = percent_delta / percent_1slot;
+ percent_last_slot = 1 - percent_interpolation_slot;
+ }
+
+ int start_slot, stop_slot, step, last_slot, interpolation_slot;
+ if(min >= 0.0 && max >= 0.0) {
+ start_slot = (int)((available_slots - slots_to_use) / 2);
+ stop_slot = start_slot + (int)slots_to_use;
+ last_slot = stop_slot - 1;
+ interpolation_slot = stop_slot;
+ step = 1;
+ }
+ else {
+ start_slot = (int)available_slots - 1 - (int)((available_slots - slots_to_use) / 2);
+ stop_slot = start_slot - (int)slots_to_use;
+ last_slot = stop_slot + 1;
+ interpolation_slot = stop_slot;
+ step = -1;
+ }
+
+ value = 0.0;
+ for(int slot = start_slot; slot != stop_slot ; slot += step)
+ value += g->series[slot];
+
+ size_t counted = slots_to_use;
+ if(percent_interpolation_slot > 0.0 && interpolation_slot >= 0 && interpolation_slot < (int)available_slots) {
+ value += g->series[interpolation_slot] * percent_interpolation_slot;
+ value += g->series[last_slot] * percent_last_slot;
+ counted++;
+ }
+
+ value = value / (NETDATA_DOUBLE)counted;
+ }
+ else
+ value = min;
+ }
+
+ if(unlikely(!netdata_double_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ //log_series_to_stderr(g->series, g->next_pos, value, "trimmed_mean");
+
+ g->next_pos = 0;
+
+ return value;
+}
#endif //NETDATA_API_QUERIES_TRIMMED_MEAN_H
diff --git a/web/api/queries/weights.c b/web/api/queries/weights.c
index 485aaca26..0830a969a 100644
--- a/web/api/queries/weights.c
+++ b/web/api/queries/weights.c
@@ -24,10 +24,11 @@ static struct {
const char *name;
WEIGHTS_METHOD value;
} weights_methods[] = {
- { "ks2" , WEIGHTS_METHOD_MC_KS2}
- , { "volume" , WEIGHTS_METHOD_MC_VOLUME}
- , { "anomaly-rate" , WEIGHTS_METHOD_ANOMALY_RATE}
- , { NULL , 0 }
+ { "ks2" , WEIGHTS_METHOD_MC_KS2}
+ , { "volume" , WEIGHTS_METHOD_MC_VOLUME}
+ , { "anomaly-rate" , WEIGHTS_METHOD_ANOMALY_RATE}
+ , { "value" , WEIGHTS_METHOD_VALUE}
+ , { NULL , 0 }
};
WEIGHTS_METHOD weights_string_to_method(const char *method) {
@@ -56,14 +57,18 @@ typedef enum {
struct register_result {
RESULT_FLAGS flags;
+ RRDHOST *host;
RRDCONTEXT_ACQUIRED *rca;
RRDINSTANCE_ACQUIRED *ria;
RRDMETRIC_ACQUIRED *rma;
NETDATA_DOUBLE value;
+ STORAGE_POINT highlighted;
+ STORAGE_POINT baseline;
+ usec_t duration_ut;
};
static DICTIONARY *register_result_init() {
- DICTIONARY *results = dictionary_create(DICT_OPTION_SINGLE_THREADED);
+ DICTIONARY *results = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct register_result));
return results;
}
@@ -71,14 +76,10 @@ static void register_result_destroy(DICTIONARY *results) {
dictionary_destroy(results);
}
-static void register_result(DICTIONARY *results,
- RRDCONTEXT_ACQUIRED *rca,
- RRDINSTANCE_ACQUIRED *ria,
- RRDMETRIC_ACQUIRED *rma,
- NETDATA_DOUBLE value,
- RESULT_FLAGS flags,
- WEIGHTS_STATS *stats,
- bool register_zero) {
+static void register_result(DICTIONARY *results, RRDHOST *host, RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria,
+ RRDMETRIC_ACQUIRED *rma, NETDATA_DOUBLE value, RESULT_FLAGS flags,
+ STORAGE_POINT *highlighted, STORAGE_POINT *baseline, WEIGHTS_STATS *stats,
+ bool register_zero, usec_t duration_ut) {
if(!netdata_double_isnumber(value)) return;
@@ -90,17 +91,25 @@ static void register_result(DICTIONARY *results,
return;
// keep track of the max of the baseline / highlight ratio
- if(flags & RESULT_IS_BASE_HIGH_RATIO && v > stats->max_base_high_ratio)
+ if((flags & RESULT_IS_BASE_HIGH_RATIO) && v > stats->max_base_high_ratio)
stats->max_base_high_ratio = v;
struct register_result t = {
.flags = flags,
+ .host = host,
.rca = rca,
.ria = ria,
.rma = rma,
- .value = v
+ .value = v,
+ .duration_ut = duration_ut,
};
+ if(highlighted)
+ t.highlighted = *highlighted;
+
+ if(baseline)
+ t.baseline = *baseline;
+
// we can use the pointer address or RMA as a unique key for each metric
char buf[20 + 1];
ssize_t len = snprintfz(buf, 20, "%p", rma);
@@ -114,112 +123,92 @@ static void results_header_to_json(DICTIONARY *results __maybe_unused, BUFFER *w
time_t after, time_t before,
time_t baseline_after, time_t baseline_before,
size_t points, WEIGHTS_METHOD method,
- RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ RRDR_TIME_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
size_t examined_dimensions __maybe_unused, usec_t duration,
WEIGHTS_STATS *stats) {
- buffer_sprintf(wb, "{\n"
- "\t\"after\": %lld,\n"
- "\t\"before\": %lld,\n"
- "\t\"duration\": %lld,\n"
- "\t\"points\": %zu,\n",
- (long long)after,
- (long long)before,
- (long long)(before - after),
- points
- );
-
- if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME)
- buffer_sprintf(wb, ""
- "\t\"baseline_after\": %lld,\n"
- "\t\"baseline_before\": %lld,\n"
- "\t\"baseline_duration\": %lld,\n"
- "\t\"baseline_points\": %zu,\n",
- (long long)baseline_after,
- (long long)baseline_before,
- (long long)(baseline_before - baseline_after),
- points << shifts
- );
-
- buffer_sprintf(wb, ""
- "\t\"statistics\": {\n"
- "\t\t\"query_time_ms\": %f,\n"
- "\t\t\"db_queries\": %zu,\n"
- "\t\t\"query_result_points\": %zu,\n"
- "\t\t\"binary_searches\": %zu,\n"
- "\t\t\"db_points_read\": %zu,\n"
- "\t\t\"db_points_per_tier\": [ ",
- (double)duration / (double)USEC_PER_MS,
- stats->db_queries,
- stats->result_points,
- stats->binary_searches,
- stats->db_points
- );
-
- for(size_t tier = 0; tier < storage_tiers ;tier++)
- buffer_sprintf(wb, "%s%zu", tier?", ":"", stats->db_points_per_tier[tier]);
-
- buffer_sprintf(wb, " ]\n"
- "\t},\n"
- "\t\"group\": \"%s\",\n"
- "\t\"method\": \"%s\",\n"
- "\t\"options\": \"",
- web_client_api_request_v1_data_group_to_string(group),
- weights_method_to_string(method)
- );
-
- web_client_api_request_v1_data_options_to_buffer(wb, options);
+ buffer_json_member_add_time_t(wb, "after", after);
+ buffer_json_member_add_time_t(wb, "before", before);
+ buffer_json_member_add_time_t(wb, "duration", before - after);
+ buffer_json_member_add_uint64(wb, "points", points);
+
+ if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME) {
+ buffer_json_member_add_time_t(wb, "baseline_after", baseline_after);
+ buffer_json_member_add_time_t(wb, "baseline_before", baseline_before);
+ buffer_json_member_add_time_t(wb, "baseline_duration", baseline_before - baseline_after);
+ buffer_json_member_add_uint64(wb, "baseline_points", points << shifts);
+ }
+
+ buffer_json_member_add_object(wb, "statistics");
+ {
+ buffer_json_member_add_double(wb, "query_time_ms", (double) duration / (double) USEC_PER_MS);
+ buffer_json_member_add_uint64(wb, "db_queries", stats->db_queries);
+ buffer_json_member_add_uint64(wb, "query_result_points", stats->result_points);
+ buffer_json_member_add_uint64(wb, "binary_searches", stats->binary_searches);
+ buffer_json_member_add_uint64(wb, "db_points_read", stats->db_points);
+
+ buffer_json_member_add_array(wb, "db_points_per_tier");
+ {
+ for (size_t tier = 0; tier < storage_tiers; tier++)
+ buffer_json_add_array_item_uint64(wb, stats->db_points_per_tier[tier]);
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_string(wb, "group", time_grouping_tostring(group));
+ buffer_json_member_add_string(wb, "method", weights_method_to_string(method));
+ web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", options);
}
static size_t registered_results_to_json_charts(DICTIONARY *results, BUFFER *wb,
time_t after, time_t before,
time_t baseline_after, time_t baseline_before,
size_t points, WEIGHTS_METHOD method,
- RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ RRDR_TIME_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
size_t examined_dimensions, usec_t duration,
WEIGHTS_STATS *stats) {
+ buffer_json_initialize(wb, "\"", "\"", 0, true, options & RRDR_OPTION_MINIFY);
+
results_header_to_json(results, wb, after, before, baseline_after, baseline_before,
points, method, group, options, shifts, examined_dimensions, duration, stats);
- buffer_strcat(wb, "\",\n\t\"correlated_charts\": {\n");
+ buffer_json_member_add_object(wb, "correlated_charts");
- size_t charts = 0, chart_dims = 0, total_dimensions = 0;
+ size_t charts = 0, total_dimensions = 0;
struct register_result *t;
RRDINSTANCE_ACQUIRED *last_ria = NULL; // never access this - we use it only for comparison
dfe_start_read(results, t) {
if(t->ria != last_ria) {
last_ria = t->ria;
- if(charts) buffer_strcat(wb, "\n\t\t\t}\n\t\t},\n");
- buffer_strcat(wb, "\t\t\"");
- buffer_strcat(wb, rrdinstance_acquired_id(t->ria));
- buffer_strcat(wb, "\": {\n");
- buffer_strcat(wb, "\t\t\t\"context\": \"");
- buffer_strcat(wb, rrdcontext_acquired_id(t->rca));
- buffer_strcat(wb, "\",\n\t\t\t\"dimensions\": {\n");
+ if(charts) {
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_object_close(wb); // chart:id
+ }
+
+ buffer_json_member_add_object(wb, rrdinstance_acquired_id(t->ria));
+ buffer_json_member_add_string(wb, "context", rrdcontext_acquired_id(t->rca));
+ buffer_json_member_add_object(wb, "dimensions");
charts++;
- chart_dims = 0;
}
- if (chart_dims) buffer_sprintf(wb, ",\n");
- buffer_sprintf(wb, "\t\t\t\t\"%s\": " NETDATA_DOUBLE_FORMAT, rrdmetric_acquired_name(t->rma), t->value);
- chart_dims++;
+ buffer_json_member_add_double(wb, rrdmetric_acquired_name(t->rma), t->value);
total_dimensions++;
}
dfe_done(t);
// close dimensions and chart
- if (total_dimensions)
- buffer_strcat(wb, "\n\t\t\t}\n\t\t}\n");
-
- // close correlated_charts
- buffer_sprintf(wb, "\t},\n"
- "\t\"correlated_dimensions\": %zu,\n"
- "\t\"total_dimensions_count\": %zu\n"
- "}\n",
- total_dimensions,
- examined_dimensions
- );
+ if (total_dimensions) {
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_object_close(wb); // chart:id
+ }
+
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_uint64(wb, "correlated_dimensions", total_dimensions);
+ buffer_json_member_add_uint64(wb, "total_dimensions_count", examined_dimensions);
+ buffer_json_finalize(wb);
return total_dimensions;
}
@@ -228,14 +217,16 @@ static size_t registered_results_to_json_contexts(DICTIONARY *results, BUFFER *w
time_t after, time_t before,
time_t baseline_after, time_t baseline_before,
size_t points, WEIGHTS_METHOD method,
- RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ RRDR_TIME_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
size_t examined_dimensions, usec_t duration,
WEIGHTS_STATS *stats) {
+ buffer_json_initialize(wb, "\"", "\"", 0, true, options & RRDR_OPTION_MINIFY);
+
results_header_to_json(results, wb, after, before, baseline_after, baseline_before,
points, method, group, options, shifts, examined_dimensions, duration, stats);
- buffer_strcat(wb, "\",\n\t\"contexts\": {\n");
+ buffer_json_member_add_object(wb, "contexts");
size_t contexts = 0, charts = 0, total_dimensions = 0, context_dims = 0, chart_dims = 0;
NETDATA_DOUBLE contexts_total_weight = 0.0, charts_total_weight = 0.0;
@@ -247,18 +238,17 @@ static size_t registered_results_to_json_contexts(DICTIONARY *results, BUFFER *w
if(t->rca != last_rca) {
last_rca = t->rca;
- if(contexts)
- buffer_sprintf(wb, "\n"
- "\t\t\t\t\t},\n"
- "\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
- "\t\t\t\t}\n\t\t\t},\n"
- "\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t},\n"
- , charts_total_weight / (double)chart_dims
- , contexts_total_weight / (double)context_dims);
+ if(contexts) {
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_member_add_double(wb, "weight", charts_total_weight / (double) chart_dims);
+ buffer_json_object_close(wb); // chart:id
+ buffer_json_object_close(wb); // charts
+ buffer_json_member_add_double(wb, "weight", contexts_total_weight / (double) context_dims);
+ buffer_json_object_close(wb); // context
+ }
- buffer_strcat(wb, "\t\t\"");
- buffer_strcat(wb, rrdcontext_acquired_id(t->rca));
- buffer_strcat(wb, "\": {\n\t\t\t\"charts\":{\n");
+ buffer_json_member_add_object(wb, rrdcontext_acquired_id(t->rca));
+ buffer_json_member_add_object(wb, "charts");
contexts++;
charts = 0;
@@ -271,25 +261,21 @@ static size_t registered_results_to_json_contexts(DICTIONARY *results, BUFFER *w
if(t->ria != last_ria) {
last_ria = t->ria;
- if(charts)
- buffer_sprintf(wb, "\n"
- "\t\t\t\t\t},\n"
- "\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
- "\t\t\t\t},\n"
- , charts_total_weight / (double)chart_dims);
+ if(charts) {
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_member_add_double(wb, "weight", charts_total_weight / (double) chart_dims);
+ buffer_json_object_close(wb); // chart:id
+ }
- buffer_strcat(wb, "\t\t\t\t\"");
- buffer_strcat(wb, rrdinstance_acquired_id(t->ria));
- buffer_strcat(wb, "\": {\n");
- buffer_strcat(wb, "\t\t\t\t\t\"dimensions\": {\n");
+ buffer_json_member_add_object(wb, rrdinstance_acquired_id(t->ria));
+ buffer_json_member_add_object(wb, "dimensions");
charts++;
chart_dims = 0;
charts_total_weight = 0.0;
}
- if (chart_dims) buffer_sprintf(wb, ",\n");
- buffer_sprintf(wb, "\t\t\t\t\t\t\"%s\": " NETDATA_DOUBLE_FORMAT, rrdmetric_acquired_name(t->rma), t->value);
+ buffer_json_member_add_double(wb, rrdmetric_acquired_name(t->rma), t->value);
charts_total_weight += t->value;
contexts_total_weight += t->value;
chart_dims++;
@@ -299,25 +285,794 @@ static size_t registered_results_to_json_contexts(DICTIONARY *results, BUFFER *w
dfe_done(t);
// close dimensions and chart
- if (total_dimensions)
- buffer_sprintf(wb, "\n"
- "\t\t\t\t\t},\n"
- "\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
- "\t\t\t\t}\n"
- "\t\t\t},\n"
- "\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
- "\t\t}\n"
- , charts_total_weight / (double)chart_dims
- , contexts_total_weight / (double)context_dims);
-
- // close correlated_charts
- buffer_sprintf(wb, "\t},\n"
- "\t\"weighted_dimensions\": %zu,\n"
- "\t\"total_dimensions_count\": %zu\n"
- "}\n",
- total_dimensions,
- examined_dimensions
- );
+ if (total_dimensions) {
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_member_add_double(wb, "weight", charts_total_weight / (double) chart_dims);
+ buffer_json_object_close(wb); // chart:id
+ buffer_json_object_close(wb); // charts
+ buffer_json_member_add_double(wb, "weight", contexts_total_weight / (double) context_dims);
+ buffer_json_object_close(wb); // context
+ }
+
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_uint64(wb, "correlated_dimensions", total_dimensions);
+ buffer_json_member_add_uint64(wb, "total_dimensions_count", examined_dimensions);
+ buffer_json_finalize(wb);
+
+ return total_dimensions;
+}
+
+struct query_weights_data {
+ QUERY_WEIGHTS_REQUEST *qwr;
+
+ SIMPLE_PATTERN *scope_nodes_sp;
+ SIMPLE_PATTERN *scope_contexts_sp;
+ SIMPLE_PATTERN *nodes_sp;
+ SIMPLE_PATTERN *contexts_sp;
+ SIMPLE_PATTERN *instances_sp;
+ SIMPLE_PATTERN *dimensions_sp;
+ SIMPLE_PATTERN *labels_sp;
+ SIMPLE_PATTERN *alerts_sp;
+
+ usec_t timeout_us;
+ bool timed_out;
+ bool interrupted;
+
+ struct query_timings timings;
+
+ size_t examined_dimensions;
+ bool register_zero;
+
+ DICTIONARY *results;
+ WEIGHTS_STATS stats;
+
+ uint32_t shifts;
+
+ struct query_versions versions;
+};
+
+#define AGGREGATED_WEIGHT_EMPTY (struct aggregated_weight) { \
+ .min = NAN, \
+ .max = NAN, \
+ .sum = NAN, \
+ .count = 0, \
+ .hsp = STORAGE_POINT_UNSET, \
+ .bsp = STORAGE_POINT_UNSET, \
+}
+
+#define merge_into_aw(aw, t) do { \
+ if(!(aw).count) { \
+ (aw).count = 1; \
+ (aw).min = (aw).max = (aw).sum = (t)->value; \
+ (aw).hsp = (t)->highlighted; \
+ if(baseline) \
+ (aw).bsp = (t)->baseline; \
+ } \
+ else { \
+ (aw).count++; \
+ (aw).sum += (t)->value; \
+ if((t)->value < (aw).min) \
+ (aw).min = (t)->value; \
+ if((t)->value > (aw).max) \
+ (aw).max = (t)->value; \
+ storage_point_merge_to((aw).hsp, (t)->highlighted); \
+ if(baseline) \
+ storage_point_merge_to((aw).bsp, (t)->baseline); \
+ } \
+} while(0)
+
+static void results_header_to_json_v2(DICTIONARY *results __maybe_unused, BUFFER *wb, struct query_weights_data *qwd,
+ time_t after, time_t before,
+ time_t baseline_after, time_t baseline_before,
+ size_t points, WEIGHTS_METHOD method,
+ RRDR_TIME_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions __maybe_unused, usec_t duration __maybe_unused,
+ WEIGHTS_STATS *stats, bool group_by) {
+
+ buffer_json_member_add_object(wb, "request");
+ buffer_json_member_add_string(wb, "method", weights_method_to_string(method));
+ web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", options);
+
+ buffer_json_member_add_object(wb, "scope");
+ buffer_json_member_add_string(wb, "scope_nodes", qwd->qwr->scope_nodes ? qwd->qwr->scope_nodes : "*");
+ buffer_json_member_add_string(wb, "scope_contexts", qwd->qwr->scope_contexts ? qwd->qwr->scope_contexts : "*");
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_object(wb, "selectors");
+ buffer_json_member_add_string(wb, "nodes", qwd->qwr->nodes ? qwd->qwr->nodes : "*");
+ buffer_json_member_add_string(wb, "contexts", qwd->qwr->contexts ? qwd->qwr->contexts : "*");
+ buffer_json_member_add_string(wb, "instances", qwd->qwr->instances ? qwd->qwr->instances : "*");
+ buffer_json_member_add_string(wb, "dimensions", qwd->qwr->dimensions ? qwd->qwr->dimensions : "*");
+ buffer_json_member_add_string(wb, "labels", qwd->qwr->labels ? qwd->qwr->labels : "*");
+ buffer_json_member_add_string(wb, "alerts", qwd->qwr->alerts ? qwd->qwr->alerts : "*");
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_object(wb, "window");
+ buffer_json_member_add_time_t(wb, "after", qwd->qwr->after);
+ buffer_json_member_add_time_t(wb, "before", qwd->qwr->before);
+ buffer_json_member_add_uint64(wb, "points", qwd->qwr->points);
+ if(qwd->qwr->options & RRDR_OPTION_SELECTED_TIER)
+ buffer_json_member_add_uint64(wb, "tier", qwd->qwr->tier);
+ else
+ buffer_json_member_add_string(wb, "tier", NULL);
+ buffer_json_object_close(wb);
+
+ if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME) {
+ buffer_json_member_add_object(wb, "baseline");
+ buffer_json_member_add_time_t(wb, "baseline_after", qwd->qwr->baseline_after);
+ buffer_json_member_add_time_t(wb, "baseline_before", qwd->qwr->baseline_before);
+ buffer_json_object_close(wb);
+ }
+
+ buffer_json_member_add_object(wb, "aggregations");
+ buffer_json_member_add_object(wb, "time");
+ buffer_json_member_add_string(wb, "time_group", time_grouping_tostring(qwd->qwr->time_group_method));
+ buffer_json_member_add_string(wb, "time_group_options", qwd->qwr->time_group_options);
+ buffer_json_object_close(wb); // time
+
+ buffer_json_member_add_array(wb, "metrics");
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_array(wb, "group_by");
+ buffer_json_group_by_to_array(wb, qwd->qwr->group_by.group_by);
+ buffer_json_array_close(wb);
+
+// buffer_json_member_add_array(wb, "group_by_label");
+// buffer_json_array_close(wb);
+
+ buffer_json_member_add_string(wb, "aggregation", group_by_aggregate_function_to_string(qwd->qwr->group_by.aggregation));
+ }
+ buffer_json_object_close(wb); // 1st group by
+ buffer_json_array_close(wb); // array
+ buffer_json_object_close(wb); // aggregations
+
+ buffer_json_member_add_uint64(wb, "timeout", qwd->qwr->timeout_ms);
+ buffer_json_object_close(wb); // request
+
+ buffer_json_member_add_object(wb, "view");
+ buffer_json_member_add_string(wb, "format", (group_by)?"grouped":"full");
+ buffer_json_member_add_string(wb, "time_group", time_grouping_tostring(group));
+
+ buffer_json_member_add_object(wb, "window");
+ buffer_json_member_add_time_t(wb, "after", after);
+ buffer_json_member_add_time_t(wb, "before", before);
+ buffer_json_member_add_time_t(wb, "duration", before - after);
+ buffer_json_member_add_uint64(wb, "points", points);
+ buffer_json_object_close(wb);
+
+ if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME) {
+ buffer_json_member_add_object(wb, "baseline");
+ buffer_json_member_add_time_t(wb, "after", baseline_after);
+ buffer_json_member_add_time_t(wb, "before", baseline_before);
+ buffer_json_member_add_time_t(wb, "duration", baseline_before - baseline_after);
+ buffer_json_member_add_uint64(wb, "points", points << shifts);
+ buffer_json_object_close(wb);
+ }
+
+ buffer_json_object_close(wb); // view
+
+ buffer_json_member_add_object(wb, "db");
+ {
+ buffer_json_member_add_uint64(wb, "db_queries", stats->db_queries);
+ buffer_json_member_add_uint64(wb, "query_result_points", stats->result_points);
+ buffer_json_member_add_uint64(wb, "binary_searches", stats->binary_searches);
+ buffer_json_member_add_uint64(wb, "db_points_read", stats->db_points);
+
+ buffer_json_member_add_array(wb, "db_points_per_tier");
+ {
+ for (size_t tier = 0; tier < storage_tiers; tier++)
+ buffer_json_add_array_item_uint64(wb, stats->db_points_per_tier[tier]);
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb); // db
+}
+
+typedef enum {
+ WPT_DIMENSION = 0,
+ WPT_INSTANCE = 1,
+ WPT_CONTEXT = 2,
+ WPT_NODE = 3,
+ WPT_GROUP = 4,
+} WEIGHTS_POINT_TYPE;
+
+struct aggregated_weight {
+ const char *name;
+ NETDATA_DOUBLE min;
+ NETDATA_DOUBLE max;
+ NETDATA_DOUBLE sum;
+ size_t count;
+ STORAGE_POINT hsp;
+ STORAGE_POINT bsp;
+};
+
+static inline void storage_point_to_json(BUFFER *wb, WEIGHTS_POINT_TYPE type, ssize_t di, ssize_t ii, ssize_t ci, ssize_t ni, struct aggregated_weight *aw, RRDR_OPTIONS options __maybe_unused, bool baseline) {
+ if(type != WPT_GROUP) {
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_uint64(wb, type); // "type"
+ buffer_json_add_array_item_int64(wb, ni);
+ if (type != WPT_NODE) {
+ buffer_json_add_array_item_int64(wb, ci);
+ if (type != WPT_CONTEXT) {
+ buffer_json_add_array_item_int64(wb, ii);
+ if (type != WPT_INSTANCE)
+ buffer_json_add_array_item_int64(wb, di);
+ else
+ buffer_json_add_array_item_string(wb, NULL);
+ }
+ else {
+ buffer_json_add_array_item_string(wb, NULL);
+ buffer_json_add_array_item_string(wb, NULL);
+ }
+ }
+ else {
+ buffer_json_add_array_item_string(wb, NULL);
+ buffer_json_add_array_item_string(wb, NULL);
+ buffer_json_add_array_item_string(wb, NULL);
+ }
+ buffer_json_add_array_item_double(wb, (aw->count) ? aw->sum / (NETDATA_DOUBLE)aw->count : 0.0); // "weight"
+ }
+ else {
+ buffer_json_member_add_array(wb, "v");
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_double(wb, aw->min); // "min"
+ buffer_json_add_array_item_double(wb, (aw->count) ? aw->sum / (NETDATA_DOUBLE)aw->count : 0.0); // "avg"
+ buffer_json_add_array_item_double(wb, aw->max); // "max"
+ buffer_json_add_array_item_double(wb, aw->sum); // "sum"
+ buffer_json_add_array_item_uint64(wb, aw->count); // "count"
+ buffer_json_array_close(wb);
+ }
+
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_double(wb, aw->hsp.min); // "min"
+ buffer_json_add_array_item_double(wb, (aw->hsp.count) ? aw->hsp.sum / (NETDATA_DOUBLE) aw->hsp.count : 0.0); // "avg"
+ buffer_json_add_array_item_double(wb, aw->hsp.max); // "max"
+ buffer_json_add_array_item_double(wb, aw->hsp.sum); // "sum"
+ buffer_json_add_array_item_uint64(wb, aw->hsp.count); // "count"
+ buffer_json_add_array_item_uint64(wb, aw->hsp.anomaly_count); // "anomaly_count"
+ buffer_json_array_close(wb);
+
+ if(baseline) {
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_double(wb, aw->bsp.min); // "min"
+ buffer_json_add_array_item_double(wb, (aw->bsp.count) ? aw->bsp.sum / (NETDATA_DOUBLE) aw->bsp.count : 0.0); // "avg"
+ buffer_json_add_array_item_double(wb, aw->bsp.max); // "max"
+ buffer_json_add_array_item_double(wb, aw->bsp.sum); // "sum"
+ buffer_json_add_array_item_uint64(wb, aw->bsp.count); // "count"
+ buffer_json_add_array_item_uint64(wb, aw->bsp.anomaly_count); // "anomaly_count"
+ buffer_json_array_close(wb);
+ }
+
+ buffer_json_array_close(wb);
+}
+
+static void multinode_data_schema(BUFFER *wb, RRDR_OPTIONS options __maybe_unused, const char *key, bool baseline, bool group_by) {
+ buffer_json_member_add_object(wb, key); // schema
+
+ buffer_json_member_add_string(wb, "type", "array");
+ buffer_json_member_add_array(wb, "items");
+
+ if(group_by) {
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "weight");
+ buffer_json_member_add_string(wb, "type", "array");
+ buffer_json_member_add_array(wb, "labels");
+ {
+ buffer_json_add_array_item_string(wb, "min");
+ buffer_json_add_array_item_string(wb, "avg");
+ buffer_json_add_array_item_string(wb, "max");
+ buffer_json_add_array_item_string(wb, "sum");
+ buffer_json_add_array_item_string(wb, "count");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+ }
+ else {
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "name", "row_type");
+ buffer_json_member_add_string(wb, "type", "integer");
+ buffer_json_member_add_array(wb, "value");
+ buffer_json_add_array_item_string(wb, "dimension");
+ buffer_json_add_array_item_string(wb, "instance");
+ buffer_json_add_array_item_string(wb, "context");
+ buffer_json_add_array_item_string(wb, "node");
+ buffer_json_array_close(wb);
+ buffer_json_object_close(wb);
+
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "ni");
+ buffer_json_member_add_string(wb, "type", "integer");
+ buffer_json_member_add_string(wb, "dictionary", "nodes");
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "ci");
+ buffer_json_member_add_string(wb, "type", "integer");
+ buffer_json_member_add_string(wb, "dictionary", "contexts");
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "ii");
+ buffer_json_member_add_string(wb, "type", "integer");
+ buffer_json_member_add_string(wb, "dictionary", "instances");
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "di");
+ buffer_json_member_add_string(wb, "type", "integer");
+ buffer_json_member_add_string(wb, "dictionary", "dimensions");
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "weight");
+ buffer_json_member_add_string(wb, "type", "number");
+ }
+ buffer_json_object_close(wb);
+ }
+
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "timeframe");
+ buffer_json_member_add_string(wb, "type", "array");
+ buffer_json_member_add_array(wb, "labels");
+ {
+ buffer_json_add_array_item_string(wb, "min");
+ buffer_json_add_array_item_string(wb, "avg");
+ buffer_json_add_array_item_string(wb, "max");
+ buffer_json_add_array_item_string(wb, "sum");
+ buffer_json_add_array_item_string(wb, "count");
+ buffer_json_add_array_item_string(wb, "anomaly_count");
+ }
+ buffer_json_array_close(wb);
+ buffer_json_member_add_object(wb, "calculations");
+ buffer_json_member_add_string(wb, "anomaly rate", "anomaly_count * 100 / count");
+ buffer_json_object_close(wb);
+ }
+ buffer_json_object_close(wb);
+
+ if(baseline) {
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "baseline timeframe");
+ buffer_json_member_add_string(wb, "type", "array");
+ buffer_json_member_add_array(wb, "labels");
+ {
+ buffer_json_add_array_item_string(wb, "min");
+ buffer_json_add_array_item_string(wb, "avg");
+ buffer_json_add_array_item_string(wb, "max");
+ buffer_json_add_array_item_string(wb, "sum");
+ buffer_json_add_array_item_string(wb, "count");
+ buffer_json_add_array_item_string(wb, "anomaly_count");
+ }
+ buffer_json_array_close(wb);
+ buffer_json_member_add_object(wb, "calculations");
+ buffer_json_member_add_string(wb, "anomaly rate", "anomaly_count * 100 / count");
+ buffer_json_object_close(wb);
+ }
+ buffer_json_object_close(wb);
+ }
+
+ buffer_json_array_close(wb); // items
+ buffer_json_object_close(wb); // schema
+}
+
+struct dict_unique_node {
+ bool existing;
+ bool exposed;
+ uint32_t i;
+ RRDHOST *host;
+ usec_t duration_ut;
+};
+
+struct dict_unique_name_units {
+ bool existing;
+ bool exposed;
+ uint32_t i;
+ const char *units;
+};
+
+struct dict_unique_id_name {
+ bool existing;
+ bool exposed;
+ uint32_t i;
+ const char *id;
+ const char *name;
+};
+
+static inline struct dict_unique_node *dict_unique_node_add(DICTIONARY *dict, RRDHOST *host, ssize_t *max_id) {
+ struct dict_unique_node *dun = dictionary_set(dict, host->machine_guid, NULL, sizeof(struct dict_unique_node));
+ if(!dun->existing) {
+ dun->existing = true;
+ dun->host = host;
+ dun->i = *max_id;
+ (*max_id)++;
+ }
+
+ return dun;
+}
+
+static inline struct dict_unique_name_units *dict_unique_name_units_add(DICTIONARY *dict, const char *name, const char *units, ssize_t *max_id) {
+ struct dict_unique_name_units *dun = dictionary_set(dict, name, NULL, sizeof(struct dict_unique_name_units));
+ if(!dun->existing) {
+ dun->units = units;
+ dun->existing = true;
+ dun->i = *max_id;
+ (*max_id)++;
+ }
+
+ return dun;
+}
+
+static inline struct dict_unique_id_name *dict_unique_id_name_add(DICTIONARY *dict, const char *id, const char *name, ssize_t *max_id) {
+ char key[1024 + 1];
+ snprintfz(key, 1024, "%s:%s", id, name);
+ struct dict_unique_id_name *dun = dictionary_set(dict, key, NULL, sizeof(struct dict_unique_id_name));
+ if(!dun->existing) {
+ dun->existing = true;
+ dun->i = *max_id;
+ (*max_id)++;
+ dun->id = id;
+ dun->name = name;
+ }
+
+ return dun;
+}
+
+static size_t registered_results_to_json_multinode_no_group_by(
+ DICTIONARY *results, BUFFER *wb,
+ time_t after, time_t before,
+ time_t baseline_after, time_t baseline_before,
+ size_t points, WEIGHTS_METHOD method,
+ RRDR_TIME_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions, struct query_weights_data *qwd,
+ WEIGHTS_STATS *stats,
+ struct query_versions *versions) {
+ buffer_json_initialize(wb, "\"", "\"", 0, true, options & RRDR_OPTION_MINIFY);
+ buffer_json_member_add_uint64(wb, "api", 2);
+
+ results_header_to_json_v2(results, wb, qwd, after, before, baseline_after, baseline_before,
+ points, method, group, options, shifts, examined_dimensions,
+ qwd->timings.executed_ut - qwd->timings.received_ut, stats, false);
+
+ version_hashes_api_v2(wb, versions);
+
+ bool baseline = method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME;
+ multinode_data_schema(wb, options, "schema", baseline, false);
+
+ DICTIONARY *dict_nodes = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct dict_unique_node));
+ DICTIONARY *dict_contexts = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct dict_unique_name_units));
+ DICTIONARY *dict_instances = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct dict_unique_id_name));
+ DICTIONARY *dict_dimensions = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct dict_unique_id_name));
+
+ buffer_json_member_add_array(wb, "result");
+
+ struct aggregated_weight node_aw = AGGREGATED_WEIGHT_EMPTY, context_aw = AGGREGATED_WEIGHT_EMPTY, instance_aw = AGGREGATED_WEIGHT_EMPTY;
+ struct register_result *t;
+ RRDHOST *last_host = NULL;
+ RRDCONTEXT_ACQUIRED *last_rca = NULL;
+ RRDINSTANCE_ACQUIRED *last_ria = NULL;
+ struct dict_unique_name_units *context_dun = NULL;
+ struct dict_unique_node *node_dun = NULL;
+ struct dict_unique_id_name *instance_dun = NULL;
+ struct dict_unique_id_name *dimension_dun = NULL;
+ ssize_t di = -1, ii = -1, ci = -1, ni = -1;
+ ssize_t di_max = 0, ii_max = 0, ci_max = 0, ni_max = 0;
+ size_t total_dimensions = 0;
+ dfe_start_read(results, t) {
+
+ // close instance
+ if(t->ria != last_ria && last_ria) {
+ storage_point_to_json(wb, WPT_INSTANCE, di, ii, ci, ni, &instance_aw, options, baseline);
+ instance_dun->exposed = true;
+ last_ria = NULL;
+ instance_aw = AGGREGATED_WEIGHT_EMPTY;
+ }
+
+ // close context
+ if(t->rca != last_rca && last_rca) {
+ storage_point_to_json(wb, WPT_CONTEXT, di, ii, ci, ni, &context_aw, options, baseline);
+ context_dun->exposed = true;
+ last_rca = NULL;
+ context_aw = AGGREGATED_WEIGHT_EMPTY;
+ }
+
+ // close node
+ if(t->host != last_host && last_host) {
+ storage_point_to_json(wb, WPT_NODE, di, ii, ci, ni, &node_aw, options, baseline);
+ node_dun->exposed = true;
+ last_host = NULL;
+ node_aw = AGGREGATED_WEIGHT_EMPTY;
+ }
+
+ // open node
+ if(t->host != last_host) {
+ last_host = t->host;
+ node_dun = dict_unique_node_add(dict_nodes, t->host, &ni_max);
+ ni = node_dun->i;
+ }
+
+ // open context
+ if(t->rca != last_rca) {
+ last_rca = t->rca;
+ context_dun = dict_unique_name_units_add(dict_contexts, rrdcontext_acquired_id(t->rca),
+ rrdcontext_acquired_units(t->rca), &ci_max);
+ ci = context_dun->i;
+ }
+
+ // open instance
+ if(t->ria != last_ria) {
+ last_ria = t->ria;
+ instance_dun = dict_unique_id_name_add(dict_instances, rrdinstance_acquired_id(t->ria), rrdinstance_acquired_name(t->ria), &ii_max);
+ ii = instance_dun->i;
+ }
+
+ dimension_dun = dict_unique_id_name_add(dict_dimensions, rrdmetric_acquired_id(t->rma), rrdmetric_acquired_name(t->rma), &di_max);
+ di = dimension_dun->i;
+
+ struct aggregated_weight aw = {
+ .min = t->value,
+ .max = t->value,
+ .sum = t->value,
+ .count = 1,
+ .hsp = t->highlighted,
+ .bsp = t->baseline,
+ };
+
+ storage_point_to_json(wb, WPT_DIMENSION, di, ii, ci, ni, &aw, options, baseline);
+ node_dun->exposed = true;
+ context_dun->exposed = true;
+ instance_dun->exposed = true;
+ dimension_dun->exposed = true;
+
+ merge_into_aw(instance_aw, t);
+ merge_into_aw(context_aw, t);
+ merge_into_aw(node_aw, t);
+
+ node_dun->duration_ut += t->duration_ut;
+ total_dimensions++;
+ }
+ dfe_done(t);
+
+ // close instance
+ if(last_ria) {
+ storage_point_to_json(wb, WPT_INSTANCE, di, ii, ci, ni, &instance_aw, options, baseline);
+ instance_dun->exposed = true;
+ }
+
+ // close context
+ if(last_rca) {
+ storage_point_to_json(wb, WPT_CONTEXT, di, ii, ci, ni, &context_aw, options, baseline);
+ context_dun->exposed = true;
+ }
+
+ // close node
+ if(last_host) {
+ storage_point_to_json(wb, WPT_NODE, di, ii, ci, ni, &node_aw, options, baseline);
+ node_dun->exposed = true;
+ }
+
+ buffer_json_array_close(wb); // points
+
+ buffer_json_member_add_object(wb, "dictionaries");
+ buffer_json_member_add_array(wb, "nodes");
+ {
+ struct dict_unique_node *dun;
+ dfe_start_read(dict_nodes, dun) {
+ if(!dun->exposed)
+ continue;
+
+ buffer_json_add_array_item_object(wb);
+ buffer_json_node_add_v2(wb, dun->host, dun->i, dun->duration_ut);
+ buffer_json_object_close(wb);
+ }
+ dfe_done(dun);
+ }
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_array(wb, "contexts");
+ {
+ struct dict_unique_name_units *dun;
+ dfe_start_read(dict_contexts, dun) {
+ if(!dun->exposed)
+ continue;
+
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "id", dun_dfe.name);
+ buffer_json_member_add_string(wb, "units", dun->units);
+ buffer_json_member_add_int64(wb, "ci", dun->i);
+ buffer_json_object_close(wb);
+ }
+ dfe_done(dun);
+ }
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_array(wb, "instances");
+ {
+ struct dict_unique_id_name *dun;
+ dfe_start_read(dict_instances, dun) {
+ if(!dun->exposed)
+ continue;
+
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "id", dun->id);
+ if(dun->id != dun->name)
+ buffer_json_member_add_string(wb, "nm", dun->name);
+ buffer_json_member_add_int64(wb, "ii", dun->i);
+ buffer_json_object_close(wb);
+ }
+ dfe_done(dun);
+ }
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_array(wb, "dimensions");
+ {
+ struct dict_unique_id_name *dun;
+ dfe_start_read(dict_dimensions, dun) {
+ if(!dun->exposed)
+ continue;
+
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "id", dun->id);
+ if(dun->id != dun->name)
+ buffer_json_member_add_string(wb, "nm", dun->name);
+ buffer_json_member_add_int64(wb, "di", dun->i);
+ buffer_json_object_close(wb);
+ }
+ dfe_done(dun);
+ }
+ buffer_json_array_close(wb);
+
+ buffer_json_object_close(wb); //dictionaries
+
+ buffer_json_agents_array_v2(wb, &qwd->timings, 0);
+ buffer_json_member_add_uint64(wb, "correlated_dimensions", total_dimensions);
+ buffer_json_member_add_uint64(wb, "total_dimensions_count", examined_dimensions);
+ buffer_json_finalize(wb);
+
+ dictionary_destroy(dict_nodes);
+ dictionary_destroy(dict_contexts);
+ dictionary_destroy(dict_instances);
+ dictionary_destroy(dict_dimensions);
+
+ return total_dimensions;
+}
+
+static size_t registered_results_to_json_multinode_group_by(
+ DICTIONARY *results, BUFFER *wb,
+ time_t after, time_t before,
+ time_t baseline_after, time_t baseline_before,
+ size_t points, WEIGHTS_METHOD method,
+ RRDR_TIME_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions, struct query_weights_data *qwd,
+ WEIGHTS_STATS *stats,
+ struct query_versions *versions) {
+ buffer_json_initialize(wb, "\"", "\"", 0, true, options & RRDR_OPTION_MINIFY);
+ buffer_json_member_add_uint64(wb, "api", 2);
+
+ results_header_to_json_v2(results, wb, qwd, after, before, baseline_after, baseline_before,
+ points, method, group, options, shifts, examined_dimensions,
+ qwd->timings.executed_ut - qwd->timings.received_ut, stats, true);
+
+ version_hashes_api_v2(wb, versions);
+
+ bool baseline = method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME;
+ multinode_data_schema(wb, options, "v_schema", baseline, true);
+
+ DICTIONARY *group_by = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE,
+ NULL, sizeof(struct aggregated_weight));
+
+ struct register_result *t;
+ size_t total_dimensions = 0;
+ BUFFER *key = buffer_create(0, NULL);
+ BUFFER *name = buffer_create(0, NULL);
+ dfe_start_read(results, t) {
+
+ buffer_flush(key);
+ buffer_flush(name);
+
+ if(qwd->qwr->group_by.group_by & RRDR_GROUP_BY_DIMENSION) {
+ buffer_strcat(key, rrdmetric_acquired_name(t->rma));
+ buffer_strcat(name, rrdmetric_acquired_name(t->rma));
+ }
+ if(qwd->qwr->group_by.group_by & RRDR_GROUP_BY_INSTANCE) {
+ if(buffer_strlen(key)) {
+ buffer_fast_strcat(key, ",", 1);
+ buffer_fast_strcat(name, ",", 1);
+ }
+
+ buffer_strcat(key, rrdinstance_acquired_id(t->ria));
+ buffer_strcat(name, rrdinstance_acquired_name(t->ria));
+
+ if(!(qwd->qwr->group_by.group_by & RRDR_GROUP_BY_NODE)) {
+ buffer_fast_strcat(key, "@", 1);
+ buffer_fast_strcat(name, "@", 1);
+ buffer_strcat(key, t->host->machine_guid);
+ buffer_strcat(name, rrdhost_hostname(t->host));
+ }
+ }
+ if(qwd->qwr->group_by.group_by & RRDR_GROUP_BY_NODE) {
+ if(buffer_strlen(key)) {
+ buffer_fast_strcat(key, ",", 1);
+ buffer_fast_strcat(name, ",", 1);
+ }
+
+ buffer_strcat(key, t->host->machine_guid);
+ buffer_strcat(name, rrdhost_hostname(t->host));
+ }
+ if(qwd->qwr->group_by.group_by & RRDR_GROUP_BY_CONTEXT) {
+ if(buffer_strlen(key)) {
+ buffer_fast_strcat(key, ",", 1);
+ buffer_fast_strcat(name, ",", 1);
+ }
+
+ buffer_strcat(key, rrdcontext_acquired_id(t->rca));
+ buffer_strcat(name, rrdcontext_acquired_id(t->rca));
+ }
+ if(qwd->qwr->group_by.group_by & RRDR_GROUP_BY_UNITS) {
+ if(buffer_strlen(key)) {
+ buffer_fast_strcat(key, ",", 1);
+ buffer_fast_strcat(name, ",", 1);
+ }
+
+ buffer_strcat(key, rrdcontext_acquired_units(t->rca));
+ buffer_strcat(name, rrdcontext_acquired_units(t->rca));
+ }
+
+ struct aggregated_weight *aw = dictionary_set(group_by, buffer_tostring(key), NULL, sizeof(struct aggregated_weight));
+ if(!aw->name) {
+ aw->name = strdupz(buffer_tostring(name));
+ aw->min = aw->max = aw->sum = t->value;
+ aw->count = 1;
+ aw->hsp = t->highlighted;
+ aw->bsp = t->baseline;
+ }
+ else
+ merge_into_aw(*aw, t);
+
+ total_dimensions++;
+ }
+ dfe_done(t);
+ buffer_free(key); key = NULL;
+ buffer_free(name); name = NULL;
+
+ struct aggregated_weight *aw;
+ buffer_json_member_add_array(wb, "result");
+ dfe_start_read(group_by, aw) {
+ const char *k = aw_dfe.name;
+ const char *n = aw->name;
+
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "id", k);
+
+ if(strcmp(k, n) != 0)
+ buffer_json_member_add_string(wb, "nm", n);
+
+ storage_point_to_json(wb, WPT_GROUP, 0, 0, 0, 0, aw, options, baseline);
+ buffer_json_object_close(wb);
+
+ freez((void *)aw->name);
+ }
+ dfe_done(aw);
+ buffer_json_array_close(wb); // result
+
+ buffer_json_agents_array_v2(wb, &qwd->timings, 0);
+ buffer_json_member_add_uint64(wb, "correlated_dimensions", total_dimensions);
+ buffer_json_member_add_uint64(wb, "total_dimensions_count", examined_dimensions);
+ buffer_json_finalize(wb);
+
+ dictionary_destroy(group_by);
return total_dimensions;
}
@@ -500,14 +1255,16 @@ NETDATA_DOUBLE *rrd2rrdr_ks2(
ONEWAYALLOC *owa, RRDHOST *host,
RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria, RRDMETRIC_ACQUIRED *rma,
time_t after, time_t before, size_t points, RRDR_OPTIONS options,
- RRDR_GROUPING group_method, const char *group_options, size_t tier,
+ RRDR_TIME_GROUPING time_group_method, const char *time_group_options, size_t tier,
WEIGHTS_STATS *stats,
- size_t *entries
+ size_t *entries,
+ STORAGE_POINT *sp
) {
NETDATA_DOUBLE *ret = NULL;
QUERY_TARGET_REQUEST qtr = {
+ .version = 1,
.host = host,
.rca = rca,
.ria = ria,
@@ -516,25 +1273,27 @@ NETDATA_DOUBLE *rrd2rrdr_ks2(
.before = before,
.points = points,
.options = options,
- .group_method = group_method,
- .group_options = group_options,
+ .time_group_method = time_group_method,
+ .time_group_options = time_group_options,
.tier = tier,
.query_source = QUERY_SOURCE_API_WEIGHTS,
- .priority = STORAGE_PRIORITY_NORMAL,
+ .priority = STORAGE_PRIORITY_SYNCHRONOUS,
};
- RRDR *r = rrd2rrdr(owa, query_target_create(&qtr));
+ QUERY_TARGET *qt = query_target_create(&qtr);
+ RRDR *r = rrd2rrdr(owa, qt);
if(!r)
goto cleanup;
stats->db_queries++;
- stats->result_points += r->internal.result_points_generated;
- stats->db_points += r->internal.db_points_read;
+ stats->result_points += r->stats.result_points_generated;
+ stats->db_points += r->stats.db_points_read;
for(size_t tr = 0; tr < storage_tiers ; tr++)
- stats->db_points_per_tier[tr] += r->internal.tier_points_read[tr];
+ stats->db_points_per_tier[tr] += r->internal.qt->db.tiers[tr].points;
- if(r->d != 1) {
- error("WEIGHTS: on query '%s' expected 1 dimension in RRDR but got %zu", r->internal.qt->id, r->d);
+ if(r->d != 1 || r->internal.qt->query.used != 1) {
+ error("WEIGHTS: on query '%s' expected 1 dimension in RRDR but got %zu r->d and %zu qt->query.used",
+ r->internal.qt->id, r->d, (size_t)r->internal.qt->query.used);
goto cleanup;
}
@@ -553,6 +1312,9 @@ NETDATA_DOUBLE *rrd2rrdr_ks2(
*entries = rrdr_rows(r);
ret = onewayalloc_mallocz(owa, sizeof(NETDATA_DOUBLE) * rrdr_rows(r));
+ if(sp)
+ *sp = r->internal.qt->query.array[0].query_points;
+
// copy the points of the dimension to a contiguous array
// there is no need to check for empty values, since empty values are already zero
// https://github.com/netdata/netdata/blob/6e3144683a73a2024d51425b20ecfd569034c858/web/api/queries/average/average.c#L41-L43
@@ -560,6 +1322,7 @@ NETDATA_DOUBLE *rrd2rrdr_ks2(
cleanup:
rrdr_free(owa, r);
+ query_target_release(qt);
return ret;
}
@@ -570,27 +1333,30 @@ static void rrdset_metric_correlations_ks2(
time_t baseline_after, time_t baseline_before,
time_t after, time_t before,
size_t points, RRDR_OPTIONS options,
- RRDR_GROUPING group_method, const char *group_options, size_t tier,
+ RRDR_TIME_GROUPING time_group_method, const char *time_group_options, size_t tier,
uint32_t shifts,
WEIGHTS_STATS *stats, bool register_zero
) {
options |= RRDR_OPTION_NATURAL_POINTS;
+ usec_t started_ut = now_monotonic_usec();
ONEWAYALLOC *owa = onewayalloc_create(16 * 1024);
size_t high_points = 0;
+ STORAGE_POINT highlighted_sp;
NETDATA_DOUBLE *highlight = rrd2rrdr_ks2(
owa, host, rca, ria, rma, after, before, points,
- options, group_method, group_options, tier, stats, &high_points);
+ options, time_group_method, time_group_options, tier, stats, &high_points, &highlighted_sp);
if(!highlight)
goto cleanup;
size_t base_points = 0;
+ STORAGE_POINT baseline_sp;
NETDATA_DOUBLE *baseline = rrd2rrdr_ks2(
owa, host, rca, ria, rma, baseline_after, baseline_before, high_points << shifts,
- options, group_method, group_options, tier, stats, &base_points);
+ options, time_group_method, time_group_options, tier, stats, &base_points, &baseline_sp);
if(!baseline)
goto cleanup;
@@ -610,9 +1376,12 @@ static void rrdset_metric_correlations_ks2(
prob = 1.0;
}
+ usec_t ended_ut = now_monotonic_usec();
+
// to spread the results evenly, 0.0 needs to be the less correlated and 1.0 the most correlated
// so, we flip the result of kstwo()
- register_result(results, rca, ria, rma, 1.0 - prob, RESULT_IS_BASE_HIGH_RATIO, stats, register_zero);
+ register_result(results, host, rca, ria, rma, 1.0 - prob, RESULT_IS_BASE_HIGH_RATIO, &highlighted_sp,
+ &baseline_sp, stats, register_zero, ended_ut - started_ut);
}
cleanup:
@@ -622,8 +1391,8 @@ cleanup:
// ----------------------------------------------------------------------------
// VOLUME algorithm functions
-static void merge_query_value_to_stats(QUERY_VALUE *qv, WEIGHTS_STATS *stats) {
- stats->db_queries++;
+static void merge_query_value_to_stats(QUERY_VALUE *qv, WEIGHTS_STATS *stats, size_t queries) {
+ stats->db_queries += queries;
stats->result_points += qv->result_points;
stats->db_points += qv->points_read;
for(size_t tier = 0; tier < storage_tiers ; tier++)
@@ -636,16 +1405,16 @@ static void rrdset_metric_correlations_volume(
DICTIONARY *results,
time_t baseline_after, time_t baseline_before,
time_t after, time_t before,
- RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options,
+ RRDR_OPTIONS options, RRDR_TIME_GROUPING time_group_method, const char *time_group_options,
size_t tier,
WEIGHTS_STATS *stats, bool register_zero) {
options |= RRDR_OPTION_MATCH_IDS | RRDR_OPTION_ABSOLUTE | RRDR_OPTION_NATURAL_POINTS;
QUERY_VALUE baseline_average = rrdmetric2value(host, rca, ria, rma, baseline_after, baseline_before,
- options, group_method, group_options, tier, 0,
- QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_NORMAL);
- merge_query_value_to_stats(&baseline_average, stats);
+ options, time_group_method, time_group_options, tier, 0,
+ QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_SYNCHRONOUS);
+ merge_query_value_to_stats(&baseline_average, stats, 1);
if(!netdata_double_isnumber(baseline_average.value)) {
// this means no data for the baseline window, but we may have data for the highlighted one - assume zero
@@ -653,9 +1422,9 @@ static void rrdset_metric_correlations_volume(
}
QUERY_VALUE highlight_average = rrdmetric2value(host, rca, ria, rma, after, before,
- options, group_method, group_options, tier, 0,
- QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_NORMAL);
- merge_query_value_to_stats(&highlight_average, stats);
+ options, time_group_method, time_group_options, tier, 0,
+ QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_SYNCHRONOUS);
+ merge_query_value_to_stats(&highlight_average, stats, 1);
if(!netdata_double_isnumber(highlight_average.value))
return;
@@ -665,12 +1434,17 @@ static void rrdset_metric_correlations_volume(
return;
}
+ if((options & RRDR_OPTION_ANOMALY_BIT) && highlight_average.value < baseline_average.value) {
+ // when working on anomaly bits, we are looking for an increase in the anomaly rate
+ return;
+ }
+
char highlight_countif_options[50 + 1];
snprintfz(highlight_countif_options, 50, "%s" NETDATA_DOUBLE_FORMAT, highlight_average.value < baseline_average.value ? "<" : ">", baseline_average.value);
QUERY_VALUE highlight_countif = rrdmetric2value(host, rca, ria, rma, after, before,
options, RRDR_GROUPING_COUNTIF, highlight_countif_options, tier, 0,
- QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_NORMAL);
- merge_query_value_to_stats(&highlight_countif, stats);
+ QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_SYNCHRONOUS);
+ merge_query_value_to_stats(&highlight_countif, stats, 1);
if(!netdata_double_isnumber(highlight_countif.value)) {
info("WEIGHTS: highlighted countif query failed, but highlighted average worked - strange...");
@@ -693,31 +1467,104 @@ static void rrdset_metric_correlations_volume(
pcent = highlight_countif.value;
}
- register_result(results, rca, ria, rma, pcent, flags, stats, register_zero);
+ register_result(results, host, rca, ria, rma, pcent, flags, &highlight_average.sp, &baseline_average.sp, stats,
+ register_zero, baseline_average.duration_ut + highlight_average.duration_ut + highlight_countif.duration_ut);
}
// ----------------------------------------------------------------------------
-// ANOMALY RATE algorithm functions
+// VALUE / ANOMALY RATE algorithm functions
-static void rrdset_weights_anomaly_rate(
+static void rrdset_weights_value(
RRDHOST *host,
RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria, RRDMETRIC_ACQUIRED *rma,
DICTIONARY *results,
time_t after, time_t before,
- RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options,
+ RRDR_OPTIONS options, RRDR_TIME_GROUPING time_group_method, const char *time_group_options,
size_t tier,
WEIGHTS_STATS *stats, bool register_zero) {
- options |= RRDR_OPTION_MATCH_IDS | RRDR_OPTION_ANOMALY_BIT | RRDR_OPTION_NATURAL_POINTS;
+ options |= RRDR_OPTION_MATCH_IDS | RRDR_OPTION_NATURAL_POINTS;
QUERY_VALUE qv = rrdmetric2value(host, rca, ria, rma, after, before,
- options, group_method, group_options, tier, 0,
- QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_NORMAL);
+ options, time_group_method, time_group_options, tier, 0,
+ QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_SYNCHRONOUS);
- merge_query_value_to_stats(&qv, stats);
+ merge_query_value_to_stats(&qv, stats, 1);
if(netdata_double_isnumber(qv.value))
- register_result(results, rca, ria, rma, qv.value, 0, stats, register_zero);
+ register_result(results, host, rca, ria, rma, qv.value, 0, &qv.sp, NULL, stats, register_zero, qv.duration_ut);
+}
+
+static void rrdset_weights_multi_dimensional_value(struct query_weights_data *qwd) {
+ QUERY_TARGET_REQUEST qtr = {
+ .version = 1,
+ .scope_nodes = qwd->qwr->scope_nodes,
+ .scope_contexts = qwd->qwr->scope_contexts,
+ .nodes = qwd->qwr->nodes,
+ .contexts = qwd->qwr->contexts,
+ .instances = qwd->qwr->instances,
+ .dimensions = qwd->qwr->dimensions,
+ .labels = qwd->qwr->labels,
+ .alerts = qwd->qwr->alerts,
+ .after = qwd->qwr->after,
+ .before = qwd->qwr->before,
+ .points = 1,
+ .options = qwd->qwr->options | RRDR_OPTION_NATURAL_POINTS,
+ .time_group_method = qwd->qwr->time_group_method,
+ .time_group_options = qwd->qwr->time_group_options,
+ .tier = qwd->qwr->tier,
+ .timeout_ms = qwd->qwr->timeout_ms,
+ .query_source = QUERY_SOURCE_API_WEIGHTS,
+ .priority = STORAGE_PRIORITY_NORMAL,
+ };
+
+ ONEWAYALLOC *owa = onewayalloc_create(16 * 1024);
+ QUERY_TARGET *qt = query_target_create(&qtr);
+ RRDR *r = rrd2rrdr(owa, qt);
+
+ if(!r || rrdr_rows(r) != 1 || !r->d || r->d != r->internal.qt->query.used)
+ goto cleanup;
+
+ QUERY_VALUE qv = {
+ .after = r->view.after,
+ .before = r->view.before,
+ .points_read = r->stats.db_points_read,
+ .result_points = r->stats.result_points_generated,
+ };
+
+ size_t queries = 0;
+ for(size_t d = 0; d < r->d ;d++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[d], qwd->qwr->options))
+ continue;
+
+ long i = 0; // only one row
+ NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
+ NETDATA_DOUBLE *ar = &r->ar[ i * r->d ];
+
+ qv.value = cn[d];
+ qv.anomaly_rate = ar[d];
+ storage_point_merge_to(qv.sp, r->internal.qt->query.array[d].query_points);
+
+ if(netdata_double_isnumber(qv.value)) {
+ QUERY_METRIC *qm = query_metric(r->internal.qt, d);
+ QUERY_DIMENSION *qd = query_dimension(r->internal.qt, qm->link.query_dimension_id);
+ QUERY_INSTANCE *qi = query_instance(r->internal.qt, qm->link.query_instance_id);
+ QUERY_CONTEXT *qc = query_context(r->internal.qt, qm->link.query_context_id);
+ QUERY_NODE *qn = query_node(r->internal.qt, qm->link.query_node_id);
+
+ register_result(qwd->results, qn->rrdhost, qc->rca, qi->ria, qd->rma, qv.value, 0, &qv.sp,
+ NULL, &qwd->stats, qwd->register_zero, qm->duration_ut);
+ }
+
+ queries++;
+ }
+
+ merge_query_value_to_stats(&qv, &qwd->stats, queries);
+
+cleanup:
+ rrdr_free(owa, r);
+ query_target_release(qt);
+ onewayalloc_destroy(owa);
}
// ----------------------------------------------------------------------------
@@ -765,13 +1612,15 @@ static size_t spread_results_evenly(DICTIONARY *results, WEIGHTS_STATS *stats) {
NETDATA_DOUBLE slots[dimensions];
dimensions = 0;
dfe_start_read(results, t) {
- if(t->flags & (RESULT_IS_PERCENTAGE_OF_TIME))
+ if(t->flags & RESULT_IS_PERCENTAGE_OF_TIME)
t->value = t->value * stats->max_base_high_ratio;
slots[dimensions++] = t->value;
}
dfe_done(t);
+ if(!dimensions) return 0; // Coverity fix
+
// sort the array with the values of all dimensions
qsort(slots, dimensions, sizeof(NETDATA_DOUBLE), compare_netdata_doubles);
@@ -805,60 +1654,184 @@ static size_t spread_results_evenly(DICTIONARY *results, WEIGHTS_STATS *stats) {
// ----------------------------------------------------------------------------
// The main function
-int web_api_v1_weights(
- RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS_FORMAT format,
- RRDR_GROUPING group, const char *group_options,
- time_t baseline_after, time_t baseline_before,
- time_t after, time_t before,
- size_t points, RRDR_OPTIONS options, SIMPLE_PATTERN *contexts, size_t tier, size_t timeout) {
+static ssize_t weights_for_rrdmetric(void *data, RRDHOST *host, RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria, RRDMETRIC_ACQUIRED *rma) {
+ struct query_weights_data *qwd = data;
+ QUERY_WEIGHTS_REQUEST *qwr = qwd->qwr;
+
+ if(qwd->qwr->interrupt_callback && qwd->qwr->interrupt_callback(qwd->qwr->interrupt_callback_data)) {
+ qwd->interrupted = true;
+ return -1;
+ }
+
+ qwd->examined_dimensions++;
+
+ switch(qwr->method) {
+ case WEIGHTS_METHOD_VALUE:
+ rrdset_weights_value(
+ host, rca, ria, rma,
+ qwd->results,
+ qwr->after, qwr->before,
+ qwr->options, qwr->time_group_method, qwr->time_group_options, qwr->tier,
+ &qwd->stats, qwd->register_zero
+ );
+ break;
+
+ case WEIGHTS_METHOD_ANOMALY_RATE:
+ qwr->options |= RRDR_OPTION_ANOMALY_BIT;
+ rrdset_weights_value(
+ host, rca, ria, rma,
+ qwd->results,
+ qwr->after, qwr->before,
+ qwr->options, qwr->time_group_method, qwr->time_group_options, qwr->tier,
+ &qwd->stats, qwd->register_zero
+ );
+ break;
+
+ case WEIGHTS_METHOD_MC_VOLUME:
+ rrdset_metric_correlations_volume(
+ host, rca, ria, rma,
+ qwd->results,
+ qwr->baseline_after, qwr->baseline_before,
+ qwr->after, qwr->before,
+ qwr->options, qwr->time_group_method, qwr->time_group_options, qwr->tier,
+ &qwd->stats, qwd->register_zero
+ );
+ break;
+
+ default:
+ case WEIGHTS_METHOD_MC_KS2:
+ rrdset_metric_correlations_ks2(
+ host, rca, ria, rma,
+ qwd->results,
+ qwr->baseline_after, qwr->baseline_before,
+ qwr->after, qwr->before, qwr->points,
+ qwr->options, qwr->time_group_method, qwr->time_group_options, qwr->tier, qwd->shifts,
+ &qwd->stats, qwd->register_zero
+ );
+ break;
+ }
+
+ qwd->timings.executed_ut = now_monotonic_usec();
+ if(qwd->timings.executed_ut - qwd->timings.received_ut > qwd->timeout_us) {
+ qwd->timed_out = true;
+ return -1;
+ }
+
+ return 1;
+}
+
+static ssize_t weights_do_context_callback(void *data, RRDCONTEXT_ACQUIRED *rca, bool queryable_context) {
+ if(!queryable_context)
+ return false;
+
+ struct query_weights_data *qwd = data;
+
+ bool has_retention = false;
+ switch(qwd->qwr->method) {
+ case WEIGHTS_METHOD_VALUE:
+ case WEIGHTS_METHOD_ANOMALY_RATE:
+ has_retention = rrdcontext_retention_match(rca, qwd->qwr->after, qwd->qwr->before);
+ break;
+
+ case WEIGHTS_METHOD_MC_KS2:
+ case WEIGHTS_METHOD_MC_VOLUME:
+ has_retention = rrdcontext_retention_match(rca, qwd->qwr->after, qwd->qwr->before);
+ if(has_retention)
+ has_retention = rrdcontext_retention_match(rca, qwd->qwr->baseline_after, qwd->qwr->baseline_before);
+ break;
+ }
+
+ if(!has_retention)
+ return 0;
+
+ ssize_t ret = weights_foreach_rrdmetric_in_context(rca,
+ qwd->instances_sp,
+ NULL,
+ qwd->labels_sp,
+ qwd->alerts_sp,
+ qwd->dimensions_sp,
+ true, true, qwd->qwr->version,
+ weights_for_rrdmetric, qwd);
+ return ret;
+}
+
+ssize_t weights_do_node_callback(void *data, RRDHOST *host, bool queryable) {
+ if(!queryable)
+ return 0;
- WEIGHTS_STATS stats = {};
+ struct query_weights_data *qwd = data;
+
+ ssize_t ret = query_scope_foreach_context(host, qwd->qwr->scope_contexts,
+ qwd->scope_contexts_sp, qwd->contexts_sp,
+ weights_do_context_callback, queryable, qwd);
+
+ return ret;
+}
+
+int web_api_v12_weights(BUFFER *wb, QUERY_WEIGHTS_REQUEST *qwr) {
- DICTIONARY *results = register_result_init();
- DICTIONARY *metrics = NULL;
char *error = NULL;
int resp = HTTP_RESP_OK;
// if the user didn't give a timeout
// assume 60 seconds
- if(!timeout)
- timeout = 60 * MSEC_PER_SEC;
+ if(!qwr->timeout_ms)
+ qwr->timeout_ms = 5 * 60 * MSEC_PER_SEC;
// if the timeout is less than 1 second
// make it at least 1 second
- if(timeout < (long)(1 * MSEC_PER_SEC))
- timeout = 1 * MSEC_PER_SEC;
-
- usec_t timeout_usec = timeout * USEC_PER_MS;
- usec_t started_usec = now_realtime_usec();
+ if(qwr->timeout_ms < (long)(1 * MSEC_PER_SEC))
+ qwr->timeout_ms = 1 * MSEC_PER_SEC;
+
+ struct query_weights_data qwd = {
+ .qwr = qwr,
+
+ .scope_nodes_sp = string_to_simple_pattern(qwr->scope_nodes),
+ .scope_contexts_sp = string_to_simple_pattern(qwr->scope_contexts),
+ .nodes_sp = string_to_simple_pattern(qwr->nodes),
+ .contexts_sp = string_to_simple_pattern(qwr->contexts),
+ .instances_sp = string_to_simple_pattern(qwr->instances),
+ .dimensions_sp = string_to_simple_pattern(qwr->dimensions),
+ .labels_sp = string_to_simple_pattern(qwr->labels),
+ .alerts_sp = string_to_simple_pattern(qwr->alerts),
+ .timeout_us = qwr->timeout_ms * USEC_PER_MS,
+ .timed_out = false,
+ .examined_dimensions = 0,
+ .register_zero = true,
+ .results = register_result_init(),
+ .stats = {},
+ .shifts = 0,
+ .timings = {
+ .received_ut = now_monotonic_usec(),
+ }
+ };
- if(!rrdr_relative_window_to_absolute(&after, &before))
+ if(!rrdr_relative_window_to_absolute(&qwr->after, &qwr->before, NULL))
buffer_no_cacheable(wb);
- if (before <= after) {
+ if (qwr->before <= qwr->after) {
resp = HTTP_RESP_BAD_REQUEST;
error = "Invalid selected time-range.";
goto cleanup;
}
- uint32_t shifts = 0;
- if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME) {
- if(!points) points = 500;
+ if(qwr->method == WEIGHTS_METHOD_MC_KS2 || qwr->method == WEIGHTS_METHOD_MC_VOLUME) {
+ if(!qwr->points) qwr->points = 500;
- if(baseline_before <= API_RELATIVE_TIME_MAX)
- baseline_before += after;
+ if(qwr->baseline_before <= API_RELATIVE_TIME_MAX)
+ qwr->baseline_before += qwr->after;
- rrdr_relative_window_to_absolute(&baseline_after, &baseline_before);
+ rrdr_relative_window_to_absolute(&qwr->baseline_after, &qwr->baseline_before, NULL);
- if (baseline_before <= baseline_after) {
+ if (qwr->baseline_before <= qwr->baseline_after) {
resp = HTTP_RESP_BAD_REQUEST;
error = "Invalid baseline time-range.";
goto cleanup;
}
// baseline should be a power of two multiple of highlight
- long long base_delta = baseline_before - baseline_after;
- long long high_delta = before - after;
+ long long base_delta = qwr->baseline_before - qwr->baseline_after;
+ long long high_delta = qwr->before - qwr->after;
uint32_t multiplier = (uint32_t)round((double)base_delta / (double)high_delta);
// check if the multiplier is a power of two
@@ -880,138 +1853,146 @@ int web_api_v1_weights(
// we need to do, to divide baseline numbers to match
// the highlight ones
while(multiplier > 1) {
- shifts++;
+ qwd.shifts++;
multiplier = multiplier >> 1;
}
// if the baseline size will not comply to MAX_POINTS
// lower the window of the baseline
- while(shifts && (points << shifts) > MAX_POINTS)
- shifts--;
+ while(qwd.shifts && (qwr->points << qwd.shifts) > MAX_POINTS)
+ qwd.shifts--;
// if the baseline size still does not comply to MAX_POINTS
// lower the resolution of the highlight and the baseline
- while((points << shifts) > MAX_POINTS)
- points = points >> 1;
+ while((qwr->points << qwd.shifts) > MAX_POINTS)
+ qwr->points = qwr->points >> 1;
- if(points < 15) {
+ if(qwr->points < 15) {
resp = HTTP_RESP_BAD_REQUEST;
error = "Too few points available, at least 15 are needed.";
goto cleanup;
}
// adjust the baseline to be multiplier times bigger than the highlight
- baseline_after = baseline_before - (high_delta << shifts);
+ qwr->baseline_after = qwr->baseline_before - (high_delta << qwd.shifts);
}
- size_t examined_dimensions = 0;
+ if(qwr->options & RRDR_OPTION_NONZERO) {
+ qwd.register_zero = false;
- bool register_zero = true;
- if(options & RRDR_OPTION_NONZERO) {
- register_zero = false;
- options &= ~RRDR_OPTION_NONZERO;
+ // remove it to run the queries without it
+ qwr->options &= ~RRDR_OPTION_NONZERO;
}
- metrics = rrdcontext_all_metrics_to_dict(host, contexts);
- struct metric_entry *me;
-
- // for every metric_entry in the dictionary
- dfe_start_read(metrics, me) {
- usec_t now_usec = now_realtime_usec();
- if(now_usec - started_usec > timeout_usec) {
- error = "timed out";
- resp = HTTP_RESP_GATEWAY_TIMEOUT;
- goto cleanup;
+ if(qwr->host && qwr->version == 1)
+ weights_do_node_callback(&qwd, qwr->host, true);
+ else {
+ if((qwd.qwr->method == WEIGHTS_METHOD_VALUE || qwd.qwr->method == WEIGHTS_METHOD_ANOMALY_RATE) && (qwd.contexts_sp || qwd.scope_contexts_sp)) {
+ rrdset_weights_multi_dimensional_value(&qwd);
}
-
- examined_dimensions++;
-
- switch(method) {
- case WEIGHTS_METHOD_ANOMALY_RATE:
- options |= RRDR_OPTION_ANOMALY_BIT;
- rrdset_weights_anomaly_rate(
- host,
- me->rca, me->ria, me->rma,
- results,
- after, before,
- options, group, group_options, tier,
- &stats, register_zero
- );
- break;
-
- case WEIGHTS_METHOD_MC_VOLUME:
- rrdset_metric_correlations_volume(
- host,
- me->rca, me->ria, me->rma,
- results,
- baseline_after, baseline_before,
- after, before,
- options, group, group_options, tier,
- &stats, register_zero
- );
- break;
-
- default:
- case WEIGHTS_METHOD_MC_KS2:
- rrdset_metric_correlations_ks2(
- host,
- me->rca, me->ria, me->rma,
- results,
- baseline_after, baseline_before,
- after, before, points,
- options, group, group_options, tier, shifts,
- &stats, register_zero
- );
- break;
+ else {
+ query_scope_foreach_host(qwd.scope_nodes_sp, qwd.nodes_sp,
+ weights_do_node_callback, &qwd,
+ &qwd.versions,
+ NULL);
}
}
- dfe_done(me);
- if(!register_zero)
- options |= RRDR_OPTION_NONZERO;
+ if(!qwd.register_zero) {
+ // put it back, to show it in the response
+ qwr->options |= RRDR_OPTION_NONZERO;
+ }
+
+ if(qwd.timed_out) {
+ error = "timed out";
+ resp = HTTP_RESP_GATEWAY_TIMEOUT;
+ goto cleanup;
+ }
+
+ if(qwd.interrupted) {
+ error = "interrupted";
+ resp = HTTP_RESP_BACKEND_FETCH_FAILED;
+ goto cleanup;
+ }
+
+ if(!qwd.register_zero)
+ qwr->options |= RRDR_OPTION_NONZERO;
- if(!(options & RRDR_OPTION_RETURN_RAW))
- spread_results_evenly(results, &stats);
+ if(!(qwr->options & RRDR_OPTION_RETURN_RAW) && qwr->method != WEIGHTS_METHOD_VALUE)
+ spread_results_evenly(qwd.results, &qwd.stats);
- usec_t ended_usec = now_realtime_usec();
+ usec_t ended_usec = qwd.timings.executed_ut = now_monotonic_usec();
// generate the json output we need
buffer_flush(wb);
size_t added_dimensions = 0;
- switch(format) {
+ switch(qwr->format) {
case WEIGHTS_FORMAT_CHARTS:
added_dimensions =
registered_results_to_json_charts(
- results, wb,
- after, before,
- baseline_after, baseline_before,
- points, method, group, options, shifts,
- examined_dimensions,
- ended_usec - started_usec, &stats);
+ qwd.results, wb,
+ qwr->after, qwr->before,
+ qwr->baseline_after, qwr->baseline_before,
+ qwr->points, qwr->method, qwr->time_group_method, qwr->options, qwd.shifts,
+ qwd.examined_dimensions,
+ ended_usec - qwd.timings.received_ut, &qwd.stats);
break;
- default:
case WEIGHTS_FORMAT_CONTEXTS:
added_dimensions =
registered_results_to_json_contexts(
- results, wb,
- after, before,
- baseline_after, baseline_before,
- points, method, group, options, shifts,
- examined_dimensions,
- ended_usec - started_usec, &stats);
+ qwd.results, wb,
+ qwr->after, qwr->before,
+ qwr->baseline_after, qwr->baseline_before,
+ qwr->points, qwr->method, qwr->time_group_method, qwr->options, qwd.shifts,
+ qwd.examined_dimensions,
+ ended_usec - qwd.timings.received_ut, &qwd.stats);
+ break;
+
+ default:
+ case WEIGHTS_FORMAT_MULTINODE:
+ // we don't support these groupings in weights
+ qwr->group_by.group_by &= ~(RRDR_GROUP_BY_LABEL|RRDR_GROUP_BY_SELECTED|RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE);
+ if(qwr->group_by.group_by == RRDR_GROUP_BY_NONE) {
+ added_dimensions =
+ registered_results_to_json_multinode_no_group_by(
+ qwd.results, wb,
+ qwr->after, qwr->before,
+ qwr->baseline_after, qwr->baseline_before,
+ qwr->points, qwr->method, qwr->time_group_method, qwr->options, qwd.shifts,
+ qwd.examined_dimensions,
+ &qwd, &qwd.stats, &qwd.versions);
+ }
+ else {
+ added_dimensions =
+ registered_results_to_json_multinode_group_by(
+ qwd.results, wb,
+ qwr->after, qwr->before,
+ qwr->baseline_after, qwr->baseline_before,
+ qwr->points, qwr->method, qwr->time_group_method, qwr->options, qwd.shifts,
+ qwd.examined_dimensions,
+ &qwd, &qwd.stats, &qwd.versions);
+ }
break;
}
- if(!added_dimensions) {
+ if(!added_dimensions && qwr->version < 2) {
error = "no results produced.";
resp = HTTP_RESP_NOT_FOUND;
}
cleanup:
- if(metrics) dictionary_destroy(metrics);
- if(results) register_result_destroy(results);
+ simple_pattern_free(qwd.scope_nodes_sp);
+ simple_pattern_free(qwd.scope_contexts_sp);
+ simple_pattern_free(qwd.nodes_sp);
+ simple_pattern_free(qwd.contexts_sp);
+ simple_pattern_free(qwd.instances_sp);
+ simple_pattern_free(qwd.dimensions_sp);
+ simple_pattern_free(qwd.labels_sp);
+ simple_pattern_free(qwd.alerts_sp);
+
+ register_result_destroy(qwd.results);
if(error) {
buffer_flush(wb);
diff --git a/web/api/queries/weights.h b/web/api/queries/weights.h
index 50d8634ef..66bea6ab2 100644
--- a/web/api/queries/weights.h
+++ b/web/api/queries/weights.h
@@ -9,22 +9,57 @@ typedef enum {
WEIGHTS_METHOD_MC_KS2 = 1,
WEIGHTS_METHOD_MC_VOLUME = 2,
WEIGHTS_METHOD_ANOMALY_RATE = 3,
+ WEIGHTS_METHOD_VALUE = 4,
} WEIGHTS_METHOD;
typedef enum {
WEIGHTS_FORMAT_CHARTS = 1,
WEIGHTS_FORMAT_CONTEXTS = 2,
+ WEIGHTS_FORMAT_MULTINODE = 3,
} WEIGHTS_FORMAT;
extern int enable_metric_correlations;
extern int metric_correlations_version;
extern WEIGHTS_METHOD default_metric_correlations_method;
-int web_api_v1_weights (RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS_FORMAT format,
- RRDR_GROUPING group, const char *group_options,
- time_t baseline_after, time_t baseline_before,
- time_t after, time_t before,
- size_t points, RRDR_OPTIONS options, SIMPLE_PATTERN *contexts, size_t tier, size_t timeout);
+typedef bool (*weights_interrupt_callback_t)(void *data);
+
+typedef struct query_weights_request {
+ size_t version;
+ RRDHOST *host;
+ const char *scope_nodes;
+ const char *scope_contexts;
+ const char *nodes;
+ const char *contexts;
+ const char *instances;
+ const char *dimensions;
+ const char *labels;
+ const char *alerts;
+
+ struct {
+ RRDR_GROUP_BY group_by;
+ char *group_by_label;
+ RRDR_GROUP_BY_FUNCTION aggregation;
+ } group_by;
+
+ WEIGHTS_METHOD method;
+ WEIGHTS_FORMAT format;
+ RRDR_TIME_GROUPING time_group_method;
+ const char *time_group_options;
+ time_t baseline_after;
+ time_t baseline_before;
+ time_t after;
+ time_t before;
+ size_t points;
+ RRDR_OPTIONS options;
+ size_t tier;
+ time_t timeout_ms;
+
+ weights_interrupt_callback_t interrupt_callback;
+ void *interrupt_callback_data;
+} QUERY_WEIGHTS_REQUEST;
+
+int web_api_v12_weights(BUFFER *wb, QUERY_WEIGHTS_REQUEST *qwr);
WEIGHTS_METHOD weights_string_to_method(const char *method);
const char *weights_method_to_string(WEIGHTS_METHOD method);
diff --git a/web/api/web_api.c b/web/api/web_api.c
new file mode 100644
index 000000000..7c1d0fa09
--- /dev/null
+++ b/web/api/web_api.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "web_api.h"
+
+int web_client_api_request_vX(RRDHOST *host, struct web_client *w, char *url_path_endpoint, struct web_api_command *api_commands) {
+ if(unlikely(!url_path_endpoint || !*url_path_endpoint)) {
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data, "Which API command?");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ uint32_t hash = simple_hash(url_path_endpoint);
+
+ for(int i = 0; api_commands[i].command ; i++) {
+ if(unlikely(hash == api_commands[i].hash && !strcmp(url_path_endpoint, api_commands[i].command))) {
+ if(unlikely(api_commands[i].acl != WEB_CLIENT_ACL_NOCHECK) && !(w->acl & api_commands[i].acl))
+ return web_client_permission_denied(w);
+
+ char *query_string = (char *)buffer_tostring(w->url_query_string_decoded);
+
+ if(*query_string == '?')
+ query_string = &query_string[1];
+
+ return api_commands[i].callback(host, w, query_string);
+ }
+ }
+
+ buffer_flush(w->response.data);
+ buffer_strcat(w->response.data, "Unsupported API command: ");
+ buffer_strcat_htmlescape(w->response.data, url_path_endpoint);
+ return HTTP_RESP_NOT_FOUND;
+}
+
+RRDCONTEXT_TO_JSON_OPTIONS rrdcontext_to_json_parse_options(char *o) {
+ RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE;
+ char *tok;
+
+ while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) {
+ if(!*tok) continue;
+
+ if(!strcmp(tok, "full") || !strcmp(tok, "all"))
+ options |= RRDCONTEXT_OPTIONS_ALL;
+ else if(!strcmp(tok, "charts") || !strcmp(tok, "instances"))
+ options |= RRDCONTEXT_OPTION_SHOW_INSTANCES;
+ else if(!strcmp(tok, "dimensions") || !strcmp(tok, "metrics"))
+ options |= RRDCONTEXT_OPTION_SHOW_METRICS;
+ else if(!strcmp(tok, "queue"))
+ options |= RRDCONTEXT_OPTION_SHOW_QUEUED;
+ else if(!strcmp(tok, "flags"))
+ options |= RRDCONTEXT_OPTION_SHOW_FLAGS;
+ else if(!strcmp(tok, "uuids"))
+ options |= RRDCONTEXT_OPTION_SHOW_UUIDS;
+ else if(!strcmp(tok, "deleted"))
+ options |= RRDCONTEXT_OPTION_SHOW_DELETED;
+ else if(!strcmp(tok, "labels"))
+ options |= RRDCONTEXT_OPTION_SHOW_LABELS;
+ else if(!strcmp(tok, "deepscan"))
+ options |= RRDCONTEXT_OPTION_DEEPSCAN;
+ else if(!strcmp(tok, "hidden"))
+ options |= RRDCONTEXT_OPTION_SHOW_HIDDEN;
+ }
+
+ return options;
+}
+
+int web_client_api_request_weights(RRDHOST *host, struct web_client *w, char *url, WEIGHTS_METHOD method, WEIGHTS_FORMAT format, size_t api_version) {
+ if (!netdata_ready)
+ return HTTP_RESP_BACKEND_FETCH_FAILED;
+
+ time_t baseline_after = 0, baseline_before = 0, after = 0, before = 0;
+ size_t points = 0;
+ RRDR_OPTIONS options = 0;
+ RRDR_TIME_GROUPING time_group_method = RRDR_GROUPING_AVERAGE;
+ time_t timeout_ms = 0;
+ size_t tier = 0;
+ const char *time_group_options = NULL, *scope_contexts = NULL, *scope_nodes = NULL, *contexts = NULL, *nodes = NULL,
+ *instances = NULL, *dimensions = NULL, *labels = NULL, *alerts = NULL;
+
+ struct group_by_pass group_by = {
+ .group_by = RRDR_GROUP_BY_NONE,
+ .group_by_label = NULL,
+ .aggregation = RRDR_GROUP_BY_FUNCTION_AVERAGE,
+ };
+
+ while (url) {
+ char *value = strsep_skip_consecutive_separators(&url, "&");
+ if (!value || !*value)
+ continue;
+
+ char *name = strsep_skip_consecutive_separators(&value, "=");
+ if (!name || !*name)
+ continue;
+ if (!value || !*value)
+ continue;
+
+ if (!strcmp(name, "baseline_after"))
+ baseline_after = str2l(value);
+
+ else if (!strcmp(name, "baseline_before"))
+ baseline_before = str2l(value);
+
+ else if (!strcmp(name, "after") || !strcmp(name, "highlight_after"))
+ after = str2l(value);
+
+ else if (!strcmp(name, "before") || !strcmp(name, "highlight_before"))
+ before = str2l(value);
+
+ else if (!strcmp(name, "points") || !strcmp(name, "max_points"))
+ points = str2ul(value);
+
+ else if (!strcmp(name, "timeout"))
+ timeout_ms = str2l(value);
+
+ else if((api_version == 1 && !strcmp(name, "group")) || (api_version >= 2 && !strcmp(name, "time_group")))
+ time_group_method = time_grouping_parse(value, RRDR_GROUPING_AVERAGE);
+
+ else if((api_version == 1 && !strcmp(name, "group_options")) || (api_version >= 2 && !strcmp(name, "time_group_options")))
+ time_group_options = value;
+
+ else if(!strcmp(name, "options"))
+ options |= web_client_api_request_v1_data_options(value);
+
+ else if(!strcmp(name, "method"))
+ method = weights_string_to_method(value);
+
+ else if(api_version == 1 && (!strcmp(name, "context") || !strcmp(name, "contexts")))
+ scope_contexts = value;
+
+ else if(api_version >= 2 && !strcmp(name, "scope_nodes")) scope_nodes = value;
+ else if(api_version >= 2 && !strcmp(name, "scope_contexts")) scope_contexts = value;
+ else if(api_version >= 2 && !strcmp(name, "nodes")) nodes = value;
+ else if(api_version >= 2 && !strcmp(name, "contexts")) contexts = value;
+ else if(api_version >= 2 && !strcmp(name, "instances")) instances = value;
+ else if(api_version >= 2 && !strcmp(name, "dimensions")) dimensions = value;
+ else if(api_version >= 2 && !strcmp(name, "labels")) labels = value;
+ else if(api_version >= 2 && !strcmp(name, "alerts")) alerts = value;
+ else if(api_version >= 2 && (!strcmp(name, "group_by") || !strcmp(name, "group_by[0]"))) {
+ group_by.group_by = group_by_parse(value);
+ }
+ else if(api_version >= 2 && (!strcmp(name, "group_by_label") || !strcmp(name, "group_by_label[0]"))) {
+ group_by.group_by_label = value;
+ }
+ else if(api_version >= 2 && (!strcmp(name, "aggregation") || !strcmp(name, "aggregation[0]"))) {
+ group_by.aggregation = group_by_aggregate_function_parse(value);
+ }
+
+ else if(!strcmp(name, "tier")) {
+ tier = str2ul(value);
+ if(tier < storage_tiers)
+ options |= RRDR_OPTION_SELECTED_TIER;
+ else
+ tier = 0;
+ }
+ }
+
+ if(options == 0)
+ // the user did not set any options
+ options = RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NULL2ZERO | RRDR_OPTION_NONZERO;
+ else
+ // the user set some options, add also these
+ options |= RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NULL2ZERO;
+
+ if(options & RRDR_OPTION_PERCENTAGE)
+ options |= RRDR_OPTION_ABSOLUTE;
+
+ if(options & RRDR_OPTION_DEBUG)
+ options &= ~RRDR_OPTION_MINIFY;
+
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+ wb->content_type = CT_APPLICATION_JSON;
+
+ QUERY_WEIGHTS_REQUEST qwr = {
+ .version = api_version,
+ .host = (api_version == 1) ? NULL : host,
+ .scope_nodes = scope_nodes,
+ .scope_contexts = scope_contexts,
+ .nodes = nodes,
+ .contexts = contexts,
+ .instances = instances,
+ .dimensions = dimensions,
+ .labels = labels,
+ .alerts = alerts,
+ .group_by = {
+ .group_by = group_by.group_by,
+ .group_by_label = group_by.group_by_label,
+ .aggregation = group_by.aggregation,
+ },
+ .method = method,
+ .format = format,
+ .time_group_method = time_group_method,
+ .time_group_options = time_group_options,
+ .baseline_after = baseline_after,
+ .baseline_before = baseline_before,
+ .after = after,
+ .before = before,
+ .points = points,
+ .options = options,
+ .tier = tier,
+ .timeout_ms = timeout_ms,
+
+ .interrupt_callback = web_client_interrupt_callback,
+ .interrupt_callback_data = w,
+ };
+
+ return web_api_v12_weights(wb, &qwr);
+}
+
+bool web_client_interrupt_callback(void *data) {
+ struct web_client *w = data;
+
+ if(w->interrupt.callback)
+ return w->interrupt.callback(w, w->interrupt.callback_data);
+
+ return sock_has_output_error(w->ofd);
+}
diff --git a/web/api/web_api.h b/web/api/web_api.h
new file mode 100644
index 000000000..0ca91841f
--- /dev/null
+++ b/web/api/web_api.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_API_H
+#define NETDATA_WEB_API_H 1
+
+#include "daemon/common.h"
+#include "web/api/badges/web_buffer_svg.h"
+#include "web/api/formatters/rrd2json.h"
+#include "web/api/health/health_cmdapi.h"
+#include "web/api/queries/weights.h"
+
+struct web_api_command {
+ const char *command;
+ uint32_t hash;
+ WEB_CLIENT_ACL acl;
+ int (*callback)(RRDHOST *host, struct web_client *w, char *url);
+};
+
+struct web_client;
+
+int web_client_api_request_vX(RRDHOST *host, struct web_client *w, char *url_path_endpoint, struct web_api_command *api_commands);
+
+static inline void fix_google_param(char *s) {
+ if(unlikely(!s || !*s)) return;
+
+ for( ; *s ;s++) {
+ if(!isalnum(*s) && *s != '.' && *s != '_' && *s != '-')
+ *s = '_';
+ }
+}
+
+int web_client_api_request_weights(RRDHOST *host, struct web_client *w, char *url, WEIGHTS_METHOD method, WEIGHTS_FORMAT format, size_t api_version);
+
+bool web_client_interrupt_callback(void *data);
+
+#include "web_api_v1.h"
+#include "web_api_v2.h"
+
+#endif //NETDATA_WEB_API_H
diff --git a/web/api/web_api_v1.c b/web/api/web_api_v1.c
index 1b38a33b1..6e23549d4 100644
--- a/web/api/web_api_v1.c
+++ b/web/api/web_api_v1.c
@@ -41,7 +41,12 @@ static struct {
, {"natural-points" , 0 , RRDR_OPTION_NATURAL_POINTS}
, {"virtual-points" , 0 , RRDR_OPTION_VIRTUAL_POINTS}
, {"all-dimensions" , 0 , RRDR_OPTION_ALL_DIMENSIONS}
- , {"plan" , 0 , RRDR_OPTION_SHOW_PLAN}
+ , {"details" , 0 , RRDR_OPTION_SHOW_DETAILS}
+ , {"debug" , 0 , RRDR_OPTION_DEBUG}
+ , {"plan" , 0 , RRDR_OPTION_DEBUG}
+ , {"minify" , 0 , RRDR_OPTION_MINIFY}
+ , {"group-by-labels" , 0 , RRDR_OPTION_GROUP_BY_LABELS}
+ , {"label-quotes" , 0 , RRDR_OPTION_LABEL_QUOTES}
, {NULL , 0 , 0}
};
@@ -53,6 +58,7 @@ static struct {
{ DATASOURCE_FORMAT_DATATABLE_JSON , 0 , DATASOURCE_DATATABLE_JSON}
, {DATASOURCE_FORMAT_DATATABLE_JSONP, 0 , DATASOURCE_DATATABLE_JSONP}
, {DATASOURCE_FORMAT_JSON , 0 , DATASOURCE_JSON}
+ , {DATASOURCE_FORMAT_JSON2 , 0 , DATASOURCE_JSON2}
, {DATASOURCE_FORMAT_JSONP , 0 , DATASOURCE_JSONP}
, {DATASOURCE_FORMAT_SSV , 0 , DATASOURCE_SSV}
, {DATASOURCE_FORMAT_CSV , 0 , DATASOURCE_CSV}
@@ -63,7 +69,9 @@ static struct {
, {DATASOURCE_FORMAT_SSV_COMMA , 0 , DATASOURCE_SSV_COMMA}
, {DATASOURCE_FORMAT_CSV_JSON_ARRAY , 0 , DATASOURCE_CSV_JSON_ARRAY}
, {DATASOURCE_FORMAT_CSV_MARKDOWN , 0 , DATASOURCE_CSV_MARKDOWN}
- , { NULL, 0, 0}
+
+ // terminator
+ , {NULL, 0, 0}
};
static struct {
@@ -92,7 +100,7 @@ void web_client_api_v1_init(void) {
for(i = 0; api_v1_data_google_formats[i].name ; i++)
api_v1_data_google_formats[i].hash = simple_hash(api_v1_data_google_formats[i].name);
- web_client_api_v1_init_grouping();
+ time_grouping_init();
uuid_t uuid;
@@ -170,7 +178,7 @@ inline RRDR_OPTIONS web_client_api_request_v1_data_options(char *o) {
RRDR_OPTIONS ret = 0x00000000;
char *tok;
- while(o && *o && (tok = mystrsep(&o, ", |"))) {
+ while(o && *o && (tok = strsep_skip_consecutive_separators(&o, ", |"))) {
if(!*tok) continue;
uint32_t hash = simple_hash(tok);
@@ -186,20 +194,20 @@ inline RRDR_OPTIONS web_client_api_request_v1_data_options(char *o) {
return ret;
}
-void web_client_api_request_v1_data_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options) {
+void web_client_api_request_v1_data_options_to_buffer_json_array(BUFFER *wb, const char *key, RRDR_OPTIONS options) {
+ buffer_json_member_add_array(wb, key);
+
RRDR_OPTIONS used = 0; // to prevent adding duplicates
- int added = 0;
for(int i = 0; api_v1_data_options[i].name ; i++) {
if (unlikely((api_v1_data_options[i].value & options) && !(api_v1_data_options[i].value & used))) {
const char *name = api_v1_data_options[i].name;
used |= api_v1_data_options[i].value;
- if(added) buffer_strcat(wb, ",");
- buffer_strcat(wb, name);
-
- added++;
+ buffer_json_add_array_item_string(wb, name);
}
}
+
+ buffer_json_array_close(wb);
}
void web_client_api_request_v1_data_options_to_string(char *buf, size_t size, RRDR_OPTIONS options) {
@@ -254,11 +262,11 @@ inline uint32_t web_client_api_request_v1_data_google_format(char *name) {
int web_client_api_request_v1_alarms_select (char *url) {
int all = 0;
while(url) {
- char *value = mystrsep(&url, "&");
+ char *value = strsep_skip_consecutive_separators(&url, "&");
if (!value || !*value) continue;
- if(!strcmp(value, "all")) all = 1;
- else if(!strcmp(value, "active")) all = 0;
+ if(!strcmp(value, "all") || !strcmp(value, "all=true")) all = 1;
+ else if(!strcmp(value, "active") || !strcmp(value, "active=true")) all = 0;
}
return all;
@@ -268,7 +276,7 @@ inline int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w,
int all = web_client_api_request_v1_alarms_select(url);
buffer_flush(w->response.data);
- w->response.data->contenttype = CT_APPLICATION_JSON;
+ w->response.data->content_type = CT_APPLICATION_JSON;
health_alarms2json(host, w->response.data, all);
buffer_no_cacheable(w->response.data);
return HTTP_RESP_OK;
@@ -278,7 +286,7 @@ inline int web_client_api_request_v1_alarms_values(RRDHOST *host, struct web_cli
int all = web_client_api_request_v1_alarms_select(url);
buffer_flush(w->response.data);
- w->response.data->contenttype = CT_APPLICATION_JSON;
+ w->response.data->content_type = CT_APPLICATION_JSON;
health_alarms_values2json(host, w->response.data, all);
buffer_no_cacheable(w->response.data);
return HTTP_RESP_OK;
@@ -292,10 +300,10 @@ inline int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_clien
buffer_sprintf(w->response.data, "[");
while(url) {
- char *value = mystrsep(&url, "&");
+ char *value = strsep_skip_consecutive_separators(&url, "&");
if(!value || !*value) continue;
- char *name = mystrsep(&value, "=");
+ char *name = strsep_skip_consecutive_separators(&value, "=");
if(!name || !*name) continue;
if(!value || !*value) continue;
@@ -321,7 +329,7 @@ inline int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_clien
health_aggregate_alarms(host, w->response.data, contexts, status);
buffer_sprintf(w->response.data, "]\n");
- w->response.data->contenttype = CT_APPLICATION_JSON;
+ w->response.data->content_type = CT_APPLICATION_JSON;
buffer_no_cacheable(w->response.data);
buffer_free(contexts);
@@ -333,10 +341,10 @@ inline int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client
char *chart = NULL;
while(url) {
- char *value = mystrsep(&url, "&");
+ char *value = strsep_skip_consecutive_separators(&url, "&");
if (!value || !*value) continue;
- char *name = mystrsep(&value, "=");
+ char *name = strsep_skip_consecutive_separators(&value, "=");
if(!name || !*name) continue;
if(!value || !*value) continue;
@@ -345,7 +353,7 @@ inline int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client
}
buffer_flush(w->response.data);
- w->response.data->contenttype = CT_APPLICATION_JSON;
+ w->response.data->content_type = CT_APPLICATION_JSON;
health_alarm_log2json(host, w->response.data, after, chart);
return HTTP_RESP_OK;
}
@@ -357,10 +365,10 @@ inline int web_client_api_request_single_chart(RRDHOST *host, struct web_client
buffer_flush(w->response.data);
while(url) {
- char *value = mystrsep(&url, "&");
+ char *value = strsep_skip_consecutive_separators(&url, "&");
if(!value || !*value) continue;
- char *name = mystrsep(&value, "=");
+ char *name = strsep_skip_consecutive_separators(&value, "=");
if(!name || !*name) continue;
if(!value || !*value) continue;
@@ -388,7 +396,7 @@ inline int web_client_api_request_single_chart(RRDHOST *host, struct web_client
goto cleanup;
}
- w->response.data->contenttype = CT_APPLICATION_JSON;
+ w->response.data->content_type = CT_APPLICATION_JSON;
st->last_accessed_time_s = now_realtime_sec();
callback(st, w->response.data);
return HTTP_RESP_OK;
@@ -401,38 +409,6 @@ inline int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_c
return web_client_api_request_single_chart(host, w, url, health_api_v1_chart_variables2json);
}
-static RRDCONTEXT_TO_JSON_OPTIONS rrdcontext_to_json_parse_options(char *o) {
- RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE;
- char *tok;
-
- while(o && *o && (tok = mystrsep(&o, ", |"))) {
- if(!*tok) continue;
-
- if(!strcmp(tok, "full") || !strcmp(tok, "all"))
- options |= RRDCONTEXT_OPTIONS_ALL;
- else if(!strcmp(tok, "charts") || !strcmp(tok, "instances"))
- options |= RRDCONTEXT_OPTION_SHOW_INSTANCES;
- else if(!strcmp(tok, "dimensions") || !strcmp(tok, "metrics"))
- options |= RRDCONTEXT_OPTION_SHOW_METRICS;
- else if(!strcmp(tok, "queue"))
- options |= RRDCONTEXT_OPTION_SHOW_QUEUED;
- else if(!strcmp(tok, "flags"))
- options |= RRDCONTEXT_OPTION_SHOW_FLAGS;
- else if(!strcmp(tok, "uuids"))
- options |= RRDCONTEXT_OPTION_SHOW_UUIDS;
- else if(!strcmp(tok, "deleted"))
- options |= RRDCONTEXT_OPTION_SHOW_DELETED;
- else if(!strcmp(tok, "labels"))
- options |= RRDCONTEXT_OPTION_SHOW_LABELS;
- else if(!strcmp(tok, "deepscan"))
- options |= RRDCONTEXT_OPTION_DEEPSCAN;
- else if(!strcmp(tok, "hidden"))
- options |= RRDCONTEXT_OPTION_SHOW_HIDDEN;
- }
-
- return options;
-}
-
static int web_client_api_request_v1_context(RRDHOST *host, struct web_client *w, char *url) {
char *context = NULL;
RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE;
@@ -443,10 +419,10 @@ static int web_client_api_request_v1_context(RRDHOST *host, struct web_client *w
buffer_flush(w->response.data);
while(url) {
- char *value = mystrsep(&url, "&");
+ char *value = strsep_skip_consecutive_separators(&url, "&");
if(!value || !*value) continue;
- char *name = mystrsep(&value, "=");
+ char *name = strsep_skip_consecutive_separators(&value, "=");
if(!name || !*name) continue;
if(!value || !*value) continue;
@@ -476,17 +452,19 @@ static int web_client_api_request_v1_context(RRDHOST *host, struct web_client *w
SIMPLE_PATTERN *chart_dimensions_pattern = NULL;
if(chart_label_key)
- chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+ chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, true);
if(chart_labels_filter)
- chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+ chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT,
+ true);
if(dimensions) {
- chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+ chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v",
+ SIMPLE_PATTERN_EXACT, true);
buffer_free(dimensions);
}
- w->response.data->contenttype = CT_APPLICATION_JSON;
+ w->response.data->content_type = CT_APPLICATION_JSON;
int ret = rrdcontext_to_json(host, w->response.data, after, before, options, context, chart_label_key_pattern, chart_labels_filter_pattern, chart_dimensions_pattern);
simple_pattern_free(chart_label_key_pattern);
@@ -505,10 +483,10 @@ static int web_client_api_request_v1_contexts(RRDHOST *host, struct web_client *
buffer_flush(w->response.data);
while(url) {
- char *value = mystrsep(&url, "&");
+ char *value = strsep_skip_consecutive_separators(&url, "&");
if(!value || !*value) continue;
- char *name = mystrsep(&value, "=");
+ char *name = strsep_skip_consecutive_separators(&value, "=");
if(!name || !*name) continue;
if(!value || !*value) continue;
@@ -532,17 +510,19 @@ static int web_client_api_request_v1_contexts(RRDHOST *host, struct web_client *
SIMPLE_PATTERN *chart_dimensions_pattern = NULL;
if(chart_label_key)
- chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+ chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT, true);
if(chart_labels_filter)
- chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+ chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT,
+ true);
if(dimensions) {
- chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+ chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v",
+ SIMPLE_PATTERN_EXACT, true);
buffer_free(dimensions);
}
- w->response.data->contenttype = CT_APPLICATION_JSON;
+ w->response.data->content_type = CT_APPLICATION_JSON;
int ret = rrdcontexts_to_json(host, w->response.data, after, before, options, chart_label_key_pattern, chart_labels_filter_pattern, chart_dimensions_pattern);
simple_pattern_free(chart_label_key_pattern);
@@ -556,7 +536,7 @@ inline int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w,
(void)url;
buffer_flush(w->response.data);
- w->response.data->contenttype = CT_APPLICATION_JSON;
+ w->response.data->content_type = CT_APPLICATION_JSON;
charts2json(host, w->response.data, 0, 0);
return HTTP_RESP_OK;
}
@@ -565,18 +545,8 @@ inline int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w,
return web_client_api_request_single_chart(host, w, url, rrd_stats_api_v1_chart);
}
-void fix_google_param(char *s) {
- if(unlikely(!s)) return;
-
- for( ; *s ;s++) {
- if(!isalnum(*s) && *s != '.' && *s != '_' && *s != '-')
- *s = '_';
- }
-}
-
-
// returns the HTTP code
-inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url) {
+static inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url) {
debug(D_WEB_CLIENT, "%llu: API v1 data with URL '%s'", w->id, url);
int ret = HTTP_RESP_BAD_REQUEST;
@@ -604,15 +574,15 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
char *chart_labels_filter = NULL;
char *group_options = NULL;
size_t tier = 0;
- RRDR_GROUPING group = RRDR_GROUPING_AVERAGE;
+ RRDR_TIME_GROUPING group = RRDR_GROUPING_AVERAGE;
DATASOURCE_FORMAT format = DATASOURCE_JSON;
RRDR_OPTIONS options = 0;
while(url) {
- char *value = mystrsep(&url, "&");
+ char *value = strsep_skip_consecutive_separators(&url, "&");
if(!value || !*value) continue;
- char *name = mystrsep(&value, "=");
+ char *name = strsep_skip_consecutive_separators(&value, "=");
if(!name || !*name) continue;
if(!value || !*value) continue;
@@ -638,7 +608,7 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
else if(!strcmp(name, "gtime")) group_time_str = value;
else if(!strcmp(name, "group_options")) group_options = value;
else if(!strcmp(name, "group")) {
- group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE);
+ group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE);
}
else if(!strcmp(name, "format")) {
format = web_client_api_request_v1_data_format(value);
@@ -658,10 +628,10 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
char *tqx_name, *tqx_value;
while(value) {
- tqx_value = mystrsep(&value, ";");
+ tqx_value = strsep_skip_consecutive_separators(&value, ";");
if(!tqx_value || !*tqx_value) continue;
- tqx_name = mystrsep(&tqx_value, ":");
+ tqx_name = strsep_skip_consecutive_separators(&tqx_value, ":");
if(!tqx_name || !*tqx_name) continue;
if(!tqx_value || !*tqx_value) continue;
@@ -722,26 +692,29 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
long group_time = (group_time_str && *group_time_str)?str2l(group_time_str):0;
QUERY_TARGET_REQUEST qtr = {
+ .version = 1,
.after = after,
.before = before,
.host = host,
.st = st,
- .hosts = NULL,
+ .nodes = NULL,
.contexts = context,
- .charts = chart,
+ .instances = chart,
.dimensions = (dimensions)?buffer_tostring(dimensions):NULL,
- .timeout = timeout,
+ .timeout_ms = timeout,
.points = points,
.format = format,
.options = options,
- .group_method = group,
- .group_options = group_options,
+ .time_group_method = group,
+ .time_group_options = group_options,
.resampling_time = group_time,
.tier = tier,
.chart_label_key = chart_label_key,
- .charts_labels_filter = chart_labels_filter,
+ .labels = chart_labels_filter,
.query_source = QUERY_SOURCE_API_DATA,
.priority = STORAGE_PRIORITY_NORMAL,
+ .interrupt_callback = web_client_interrupt_callback,
+ .interrupt_callback_data = w,
};
qt = query_target_create(&qtr);
@@ -751,22 +724,12 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
goto cleanup;
}
- if (timeout) {
- struct timeval now;
- now_realtime_timeval(&now);
- int inqueue = (int)dt_usec(&w->tv_in, &now) / 1000;
- timeout -= inqueue;
- if (timeout <= 0) {
- buffer_flush(w->response.data);
- buffer_strcat(w->response.data, "Query timeout exceeded");
- ret = HTTP_RESP_BACKEND_FETCH_FAILED;
- goto cleanup;
- }
+ web_client_timeout_checkpoint_set(w, timeout);
+ if(web_client_timeout_checkpoint_and_check(w, NULL)) {
+ ret = w->response.code;
+ goto cleanup;
}
- debug(D_WEB_CLIENT, "%llu: API command 'data' for chart '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%u', format '%u', options '0x%08x'"
- , w->id, chart, (dimensions)?buffer_tostring(dimensions):"", after, before , points, group, format, options);
-
if(outFileName && *outFileName) {
buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName);
debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName);
@@ -814,10 +777,7 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
buffer_strcat(w->response.data, ");");
cleanup:
- if(qt && qt->used) {
- internal_error(true, "QUERY_TARGET: left non-released on query '%s'", qt->id);
- query_target_release(qt);
- }
+ query_target_release(qt);
onewayalloc_destroy(owa);
buffer_free(dimensions);
return ret;
@@ -886,10 +846,10 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *
buffer_no_cacheable(w->response.data);
while(url) {
- char *value = mystrsep(&url, "&");
+ char *value = strsep_skip_consecutive_separators(&url, "&");
if (!value || !*value) continue;
- char *name = mystrsep(&value, "=");
+ char *name = strsep_skip_consecutive_separators(&value, "=");
if (!name || !*name) continue;
if (!value || !*value) continue;
@@ -1013,8 +973,10 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *
}
}
-static inline void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST *host, BUFFER *wb) {
- int alarm_normal = 0, alarm_warn = 0, alarm_crit = 0;
+void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST *host, BUFFER *wb, const char *key) {
+ buffer_json_member_add_object(wb, key);
+
+ size_t normal = 0, warning = 0, critical = 0;
RRDCALC *rc;
foreach_rrdcalc_in_rrdhost_read(host, rc) {
if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
@@ -1022,274 +984,217 @@ static inline void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST
switch(rc->status) {
case RRDCALC_STATUS_WARNING:
- alarm_warn++;
+ warning++;
break;
case RRDCALC_STATUS_CRITICAL:
- alarm_crit++;
+ critical++;
break;
default:
- alarm_normal++;
+ normal++;
}
}
foreach_rrdcalc_in_rrdhost_done(rc);
- buffer_sprintf(wb, "\t\t\"normal\": %d,\n", alarm_normal);
- buffer_sprintf(wb, "\t\t\"warning\": %d,\n", alarm_warn);
- buffer_sprintf(wb, "\t\t\"critical\": %d\n", alarm_crit);
+
+ buffer_json_member_add_uint64(wb, "normal", normal);
+ buffer_json_member_add_uint64(wb, "warning", warning);
+ buffer_json_member_add_uint64(wb, "critical", critical);
+
+ buffer_json_object_close(wb);
+}
+
+static inline void web_client_api_request_v1_info_mirrored_hosts_status(BUFFER *wb, RRDHOST *host) {
+ buffer_json_add_array_item_object(wb);
+
+ buffer_json_member_add_string(wb, "hostname", rrdhost_hostname(host));
+ buffer_json_member_add_uint64(wb, "hops", host->system_info ? host->system_info->hops : (host == localhost) ? 0 : 1);
+ buffer_json_member_add_boolean(wb, "reachable", (host == localhost || !rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN)));
+
+ buffer_json_member_add_string(wb, "guid", host->machine_guid);
+ buffer_json_member_add_uuid(wb, "node_id", host->node_id);
+ rrdhost_aclk_state_lock(host);
+ buffer_json_member_add_string(wb, "claim_id", host->aclk_state.claimed_id);
+ rrdhost_aclk_state_unlock(host);
+
+ buffer_json_object_close(wb);
}
static inline void web_client_api_request_v1_info_mirrored_hosts(BUFFER *wb) {
RRDHOST *host;
- int count = 0;
- buffer_strcat(wb, "\t\"mirrored_hosts\": [\n");
rrd_rdlock();
- rrdhost_foreach_read(host) {
- if (count > 0)
- buffer_strcat(wb, ",\n");
- buffer_sprintf(wb, "\t\t\"%s\"", rrdhost_hostname(host));
- count++;
- }
-
- buffer_strcat(wb, "\n\t],\n\t\"mirrored_hosts_status\": [\n");
- count = 0;
+ buffer_json_member_add_array(wb, "mirrored_hosts");
rrdhost_foreach_read(host)
- {
- if (count > 0)
- buffer_strcat(wb, ",\n");
+ buffer_json_add_array_item_string(wb, rrdhost_hostname(host));
+ buffer_json_array_close(wb);
- buffer_sprintf(
- wb, "\t\t{ \"guid\": \"%s\", \"hostname\": \"%s\", \"reachable\": %s, \"hops\": %d"
- , host->machine_guid
- , rrdhost_hostname(host)
- , (host == localhost || !rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN)) ? "true" : "false"
- , host->system_info ? host->system_info->hops : (host == localhost) ? 0 : 1
- );
-
- rrdhost_aclk_state_lock(host);
- if (host->aclk_state.claimed_id)
- buffer_sprintf(wb, ", \"claim_id\": \"%s\"", host->aclk_state.claimed_id);
- else
- buffer_strcat(wb, ", \"claim_id\": null");
- rrdhost_aclk_state_unlock(host);
-
- if (host->node_id) {
- char node_id_str[GUID_LEN + 1];
- uuid_unparse_lower(*host->node_id, node_id_str);
- buffer_sprintf(wb, ", \"node_id\": \"%s\" }", node_id_str);
- } else
- buffer_strcat(wb, ", \"node_id\": null }");
-
- count++;
+ buffer_json_member_add_array(wb, "mirrored_hosts_status");
+ rrdhost_foreach_read(host) {
+ if ((host == localhost || !rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) {
+ web_client_api_request_v1_info_mirrored_hosts_status(wb, host);
+ }
}
+ rrdhost_foreach_read(host) {
+ if ((host != localhost && rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN))) {
+ web_client_api_request_v1_info_mirrored_hosts_status(wb, host);
+ }
+ }
+ buffer_json_array_close(wb);
+
rrd_unlock();
+}
- buffer_strcat(wb, "\n\t],\n");
+void host_labels2json(RRDHOST *host, BUFFER *wb, const char *key) {
+ buffer_json_member_add_object(wb, key);
+ rrdlabels_to_buffer_json_members(host->rrdlabels, wb);
+ buffer_json_object_close(wb);
}
-inline void host_labels2json(RRDHOST *host, BUFFER *wb, size_t indentation) {
- char tabs[11];
+static void host_collectors(RRDHOST *host, BUFFER *wb) {
+ buffer_json_member_add_array(wb, "collectors");
+
+ DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
+ RRDSET *st;
+ char name[500];
- if (indentation > 10)
- indentation = 10;
+ time_t now = now_realtime_sec();
- tabs[0] = '\0';
- while (indentation) {
- strcat(tabs, "\t");
- indentation--;
+ rrdset_foreach_read(st, host) {
+ if (!rrdset_is_available_for_viewers(st))
+ continue;
+
+ sprintf(name, "%s:%s", rrdset_plugin_name(st), rrdset_module_name(st));
+
+ bool old = 0;
+ bool *set = dictionary_set(dict, name, &old, sizeof(bool));
+ if(!*set) {
+ *set = true;
+ st->last_accessed_time_s = now;
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "plugin", rrdset_plugin_name(st));
+ buffer_json_member_add_string(wb, "module", rrdset_module_name(st));
+ buffer_json_object_close(wb);
+ }
}
+ rrdset_foreach_done(st);
+ dictionary_destroy(dict);
- rrdlabels_to_buffer(host->rrdlabels, wb, tabs, ":", "\"", ",\n", NULL, NULL, NULL, NULL);
- buffer_strcat(wb, "\n");
+ buffer_json_array_close(wb);
}
extern int aclk_connected;
-inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb)
-{
- buffer_strcat(wb, "{\n");
- buffer_sprintf(wb, "\t\"version\": \"%s\",\n", rrdhost_program_version(host));
- buffer_sprintf(wb, "\t\"uid\": \"%s\",\n", host->machine_guid);
+inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb) {
+ buffer_json_initialize(wb, "\"", "\"", 0, true, false);
+ buffer_json_member_add_string(wb, "version", rrdhost_program_version(host));
+ buffer_json_member_add_string(wb, "uid", host->machine_guid);
+
+ buffer_json_member_add_uint64(wb, "hosts-available", rrdhost_hosts_available());
web_client_api_request_v1_info_mirrored_hosts(wb);
- buffer_strcat(wb, "\t\"alarms\": {\n");
- web_client_api_request_v1_info_summary_alarm_statuses(host, wb);
- buffer_strcat(wb, "\t},\n");
-
- buffer_sprintf(wb, "\t\"os_name\": \"%s\",\n", (host->system_info->host_os_name) ? host->system_info->host_os_name : "");
- buffer_sprintf(wb, "\t\"os_id\": \"%s\",\n", (host->system_info->host_os_id) ? host->system_info->host_os_id : "");
- buffer_sprintf(wb, "\t\"os_id_like\": \"%s\",\n", (host->system_info->host_os_id_like) ? host->system_info->host_os_id_like : "");
- buffer_sprintf(wb, "\t\"os_version\": \"%s\",\n", (host->system_info->host_os_version) ? host->system_info->host_os_version : "");
- buffer_sprintf(wb, "\t\"os_version_id\": \"%s\",\n", (host->system_info->host_os_version_id) ? host->system_info->host_os_version_id : "");
- buffer_sprintf(wb, "\t\"os_detection\": \"%s\",\n", (host->system_info->host_os_detection) ? host->system_info->host_os_detection : "");
- buffer_sprintf(wb, "\t\"cores_total\": \"%s\",\n", (host->system_info->host_cores) ? host->system_info->host_cores : "");
- buffer_sprintf(wb, "\t\"total_disk_space\": \"%s\",\n", (host->system_info->host_disk_space) ? host->system_info->host_disk_space : "");
- buffer_sprintf(wb, "\t\"cpu_freq\": \"%s\",\n", (host->system_info->host_cpu_freq) ? host->system_info->host_cpu_freq : "");
- buffer_sprintf(wb, "\t\"ram_total\": \"%s\",\n", (host->system_info->host_ram_total) ? host->system_info->host_ram_total : "");
-
- if (host->system_info->container_os_name)
- buffer_sprintf(wb, "\t\"container_os_name\": \"%s\",\n", host->system_info->container_os_name);
- if (host->system_info->container_os_id)
- buffer_sprintf(wb, "\t\"container_os_id\": \"%s\",\n", host->system_info->container_os_id);
- if (host->system_info->container_os_id_like)
- buffer_sprintf(wb, "\t\"container_os_id_like\": \"%s\",\n", host->system_info->container_os_id_like);
- if (host->system_info->container_os_version)
- buffer_sprintf(wb, "\t\"container_os_version\": \"%s\",\n", host->system_info->container_os_version);
- if (host->system_info->container_os_version_id)
- buffer_sprintf(wb, "\t\"container_os_version_id\": \"%s\",\n", host->system_info->container_os_version_id);
- if (host->system_info->container_os_detection)
- buffer_sprintf(wb, "\t\"container_os_detection\": \"%s\",\n", host->system_info->container_os_detection);
- if (host->system_info->is_k8s_node)
- buffer_sprintf(wb, "\t\"is_k8s_node\": \"%s\",\n", host->system_info->is_k8s_node);
-
- buffer_sprintf(wb, "\t\"kernel_name\": \"%s\",\n", (host->system_info->kernel_name) ? host->system_info->kernel_name : "");
- buffer_sprintf(wb, "\t\"kernel_version\": \"%s\",\n", (host->system_info->kernel_version) ? host->system_info->kernel_version : "");
- buffer_sprintf(wb, "\t\"architecture\": \"%s\",\n", (host->system_info->architecture) ? host->system_info->architecture : "");
- buffer_sprintf(wb, "\t\"virtualization\": \"%s\",\n", (host->system_info->virtualization) ? host->system_info->virtualization : "");
- buffer_sprintf(wb, "\t\"virt_detection\": \"%s\",\n", (host->system_info->virt_detection) ? host->system_info->virt_detection : "");
- buffer_sprintf(wb, "\t\"container\": \"%s\",\n", (host->system_info->container) ? host->system_info->container : "");
- buffer_sprintf(wb, "\t\"container_detection\": \"%s\",\n", (host->system_info->container_detection) ? host->system_info->container_detection : "");
-
- if (host->system_info->cloud_provider_type)
- buffer_sprintf(wb, "\t\"cloud_provider_type\": \"%s\",\n", host->system_info->cloud_provider_type);
- if (host->system_info->cloud_instance_type)
- buffer_sprintf(wb, "\t\"cloud_instance_type\": \"%s\",\n", host->system_info->cloud_instance_type);
- if (host->system_info->cloud_instance_region)
- buffer_sprintf(wb, "\t\"cloud_instance_region\": \"%s\",\n", host->system_info->cloud_instance_region);
-
- buffer_strcat(wb, "\t\"host_labels\": {\n");
- host_labels2json(host, wb, 2);
- buffer_strcat(wb, "\t},\n");
-
- buffer_strcat(wb, "\t\"functions\": {\n");
- host_functions2json(host, wb, 2, "\"", "\"");
- buffer_strcat(wb, "\t},\n");
-
- buffer_strcat(wb, "\t\"collectors\": [");
- chartcollectors2json(host, wb);
- buffer_strcat(wb, "\n\t],\n");
+ web_client_api_request_v1_info_summary_alarm_statuses(host, wb, "alarms");
+
+ buffer_json_member_add_string_or_empty(wb, "os_name", host->system_info->host_os_name);
+ buffer_json_member_add_string_or_empty(wb, "os_id", host->system_info->host_os_id);
+ buffer_json_member_add_string_or_empty(wb, "os_id_like", host->system_info->host_os_id_like);
+ buffer_json_member_add_string_or_empty(wb, "os_version", host->system_info->host_os_version);
+ buffer_json_member_add_string_or_empty(wb, "os_version_id", host->system_info->host_os_version_id);
+ buffer_json_member_add_string_or_empty(wb, "os_detection", host->system_info->host_os_detection);
+ buffer_json_member_add_string_or_empty(wb, "cores_total", host->system_info->host_cores);
+ buffer_json_member_add_string_or_empty(wb, "total_disk_space", host->system_info->host_disk_space);
+ buffer_json_member_add_string_or_empty(wb, "cpu_freq", host->system_info->host_cpu_freq);
+ buffer_json_member_add_string_or_empty(wb, "ram_total", host->system_info->host_ram_total);
+
+ buffer_json_member_add_string_or_omit(wb, "container_os_name", host->system_info->container_os_name);
+ buffer_json_member_add_string_or_omit(wb, "container_os_id", host->system_info->container_os_id);
+ buffer_json_member_add_string_or_omit(wb, "container_os_id_like", host->system_info->container_os_id_like);
+ buffer_json_member_add_string_or_omit(wb, "container_os_version", host->system_info->container_os_version);
+ buffer_json_member_add_string_or_omit(wb, "container_os_version_id", host->system_info->container_os_version_id);
+ buffer_json_member_add_string_or_omit(wb, "container_os_detection", host->system_info->container_os_detection);
+ buffer_json_member_add_string_or_omit(wb, "is_k8s_node", host->system_info->is_k8s_node);
+
+ buffer_json_member_add_string_or_empty(wb, "kernel_name", host->system_info->kernel_name);
+ buffer_json_member_add_string_or_empty(wb, "kernel_version", host->system_info->kernel_version);
+ buffer_json_member_add_string_or_empty(wb, "architecture", host->system_info->architecture);
+ buffer_json_member_add_string_or_empty(wb, "virtualization", host->system_info->virtualization);
+ buffer_json_member_add_string_or_empty(wb, "virt_detection", host->system_info->virt_detection);
+ buffer_json_member_add_string_or_empty(wb, "container", host->system_info->container);
+ buffer_json_member_add_string_or_empty(wb, "container_detection", host->system_info->container_detection);
+
+ buffer_json_member_add_string_or_omit(wb, "cloud_provider_type", host->system_info->cloud_provider_type);
+ buffer_json_member_add_string_or_omit(wb, "cloud_instance_type", host->system_info->cloud_instance_type);
+ buffer_json_member_add_string_or_omit(wb, "cloud_instance_region", host->system_info->cloud_instance_region);
+
+ host_labels2json(host, wb, "host_labels");
+ host_functions2json(host, wb);
+ host_collectors(host, wb);
#ifdef DISABLE_CLOUD
- buffer_strcat(wb, "\t\"cloud-enabled\": false,\n");
+ buffer_json_member_add_boolean(wb, "cloud-enabled", false);
#else
- buffer_sprintf(wb, "\t\"cloud-enabled\": %s,\n",
- appconfig_get_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", 1) ? "true" : "false");
+ buffer_json_member_add_boolean(wb, "cloud-enabled",
+ appconfig_get_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", true));
#endif
#ifdef ENABLE_ACLK
- buffer_strcat(wb, "\t\"cloud-available\": true,\n");
+ buffer_json_member_add_boolean(wb, "cloud-available", true);
#else
- buffer_strcat(wb, "\t\"cloud-available\": false,\n");
+ buffer_json_member_add_boolean(wb, "cloud-available", false);
#endif
+
char *agent_id = get_agent_claimid();
- if (agent_id == NULL)
- buffer_strcat(wb, "\t\"agent-claimed\": false,\n");
- else {
- buffer_strcat(wb, "\t\"agent-claimed\": true,\n");
- freez(agent_id);
- }
+ buffer_json_member_add_boolean(wb, "agent-claimed", agent_id != NULL);
+ freez(agent_id);
+
#ifdef ENABLE_ACLK
- if (aclk_connected) {
- buffer_strcat(wb, "\t\"aclk-available\": true,\n");
- }
- else
+ buffer_json_member_add_boolean(wb, "aclk-available", aclk_connected);
+#else
+ buffer_json_member_add_boolean(wb, "aclk-available", false);
#endif
- buffer_strcat(wb, "\t\"aclk-available\": false,\n"); // Intentionally valid with/without #ifdef above
-
- buffer_strcat(wb, "\t\"memory-mode\": ");
- analytics_get_data(analytics_data.netdata_config_memory_mode, wb);
- buffer_strcat(wb, ",\n");
-
- buffer_strcat(wb, "\t\"multidb-disk-quota\": ");
- analytics_get_data(analytics_data.netdata_config_multidb_disk_quota, wb);
- buffer_strcat(wb, ",\n");
- buffer_strcat(wb, "\t\"page-cache-size\": ");
- analytics_get_data(analytics_data.netdata_config_page_cache_size, wb);
- buffer_strcat(wb, ",\n");
-
- buffer_strcat(wb, "\t\"stream-enabled\": ");
- analytics_get_data(analytics_data.netdata_config_stream_enabled, wb);
- buffer_strcat(wb, ",\n");
+ buffer_json_member_add_string(wb, "memory-mode", rrd_memory_mode_name(host->rrd_memory_mode));
+#ifdef ENABLE_DBENGINE
+ buffer_json_member_add_uint64(wb, "multidb-disk-quota", default_multidb_disk_quota_mb);
+ buffer_json_member_add_uint64(wb, "page-cache-size", default_rrdeng_page_cache_mb);
+#endif // ENABLE_DBENGINE
+ buffer_json_member_add_boolean(wb, "web-enabled", web_server_mode != WEB_SERVER_MODE_NONE);
+ buffer_json_member_add_boolean(wb, "stream-enabled", default_rrdpush_enabled);
#ifdef ENABLE_COMPRESSION
- if(host->sender){
- buffer_strcat(wb, "\t\"stream-compression\": ");
- buffer_strcat(wb, stream_has_capability(host->sender, STREAM_CAP_COMPRESSION) ? "true" : "false");
- buffer_strcat(wb, ",\n");
- }else{
- buffer_strcat(wb, "\t\"stream-compression\": null,\n");
- }
+ buffer_json_member_add_boolean(wb, "stream-compression",
+ host->sender && stream_has_capability(host->sender, STREAM_CAP_COMPRESSION));
#else
- buffer_strcat(wb, "\t\"stream-compression\": null,\n");
-#endif //ENABLE_COMPRESSION
-
- buffer_strcat(wb, "\t\"hosts-available\": ");
- analytics_get_data(analytics_data.netdata_config_hosts_available, wb);
- buffer_strcat(wb, ",\n");
-
- buffer_strcat(wb, "\t\"https-enabled\": ");
- analytics_get_data(analytics_data.netdata_config_https_enabled, wb);
- buffer_strcat(wb, ",\n");
+ buffer_json_member_add_boolean(wb, "stream-compression", false);
+#endif //ENABLE_COMPRESSION
- buffer_strcat(wb, "\t\"buildinfo\": ");
- analytics_get_data(analytics_data.netdata_buildinfo, wb);
- buffer_strcat(wb, ",\n");
-
- buffer_strcat(wb, "\t\"release-channel\": ");
- analytics_get_data(analytics_data.netdata_config_release_channel, wb);
- buffer_strcat(wb, ",\n");
-
- buffer_strcat(wb, "\t\"web-enabled\": ");
- analytics_get_data(analytics_data.netdata_config_web_enabled, wb);
- buffer_strcat(wb, ",\n");
-
- buffer_strcat(wb, "\t\"notification-methods\": ");
- analytics_get_data(analytics_data.netdata_notification_methods, wb);
- buffer_strcat(wb, ",\n");
-
- buffer_strcat(wb, "\t\"exporting-enabled\": ");
- analytics_get_data(analytics_data.netdata_config_exporting_enabled, wb);
- buffer_strcat(wb, ",\n");
-
- buffer_strcat(wb, "\t\"exporting-connectors\": ");
- analytics_get_data(analytics_data.netdata_exporting_connectors, wb);
- buffer_strcat(wb, ",\n");
-
- buffer_strcat(wb, "\t\"allmetrics-prometheus-used\": ");
- analytics_get_data(analytics_data.netdata_allmetrics_prometheus_used, wb);
- buffer_strcat(wb, ",\n");
-
- buffer_strcat(wb, "\t\"allmetrics-shell-used\": ");
- analytics_get_data(analytics_data.netdata_allmetrics_shell_used, wb);
- buffer_strcat(wb, ",\n");
+#ifdef ENABLE_HTTPS
+ buffer_json_member_add_boolean(wb, "https-enabled", true);
+#else
+ buffer_json_member_add_boolean(wb, "https-enabled", false);
+#endif
- buffer_strcat(wb, "\t\"allmetrics-json-used\": ");
- analytics_get_data(analytics_data.netdata_allmetrics_json_used, wb);
- buffer_strcat(wb, ",\n");
+ buffer_json_member_add_quoted_string(wb, "buildinfo", analytics_data.netdata_buildinfo);
+ buffer_json_member_add_quoted_string(wb, "release-channel", analytics_data.netdata_config_release_channel);
+ buffer_json_member_add_quoted_string(wb, "notification-methods", analytics_data.netdata_notification_methods);
- buffer_strcat(wb, "\t\"dashboard-used\": ");
- analytics_get_data(analytics_data.netdata_dashboard_used, wb);
- buffer_strcat(wb, ",\n");
+ buffer_json_member_add_boolean(wb, "exporting-enabled", analytics_data.exporting_enabled);
+ buffer_json_member_add_quoted_string(wb, "exporting-connectors", analytics_data.netdata_exporting_connectors);
- buffer_strcat(wb, "\t\"charts-count\": ");
- analytics_get_data(analytics_data.netdata_charts_count, wb);
- buffer_strcat(wb, ",\n");
+ buffer_json_member_add_uint64(wb, "allmetrics-prometheus-used", analytics_data.prometheus_hits);
+ buffer_json_member_add_uint64(wb, "allmetrics-shell-used", analytics_data.shell_hits);
+ buffer_json_member_add_uint64(wb, "allmetrics-json-used", analytics_data.json_hits);
+ buffer_json_member_add_uint64(wb, "dashboard-used", analytics_data.dashboard_hits);
- buffer_strcat(wb, "\t\"metrics-count\": ");
- analytics_get_data(analytics_data.netdata_metrics_count, wb);
+ buffer_json_member_add_uint64(wb, "charts-count", analytics_data.charts_count);
+ buffer_json_member_add_uint64(wb, "metrics-count", analytics_data.metrics_count);
#if defined(ENABLE_ML)
- buffer_strcat(wb, ",\n");
- char *ml_info = ml_get_host_info(host);
-
- buffer_strcat(wb, "\t\"ml-info\": ");
- buffer_strcat(wb, ml_info);
-
- freez(ml_info);
+ buffer_json_member_add_object(wb, "ml-info");
+ ml_host_get_info(host, wb);
+ buffer_json_object_close(wb);
#endif
- buffer_strcat(wb, "\n}");
+ buffer_json_finalize(wb);
return 0;
}
@@ -1300,17 +1205,16 @@ int web_client_api_request_v1_ml_info(RRDHOST *host, struct web_client *w, char
if (!netdata_ready)
return HTTP_RESP_BACKEND_FETCH_FAILED;
- char *s = ml_get_host_runtime_info(host);
- if (!s)
- s = strdupz("{\"error\": \"json string is empty\" }\n");
-
BUFFER *wb = w->response.data;
buffer_flush(wb);
- wb->contenttype = CT_APPLICATION_JSON;
- buffer_strcat(wb, s);
+ wb->content_type = CT_APPLICATION_JSON;
+
+ buffer_json_initialize(wb, "\"", "\"", 0, true, false);
+ ml_host_get_detection_info(host, wb);
+ buffer_json_finalize(wb);
+
buffer_no_cacheable(wb);
- freez(s);
return HTTP_RESP_OK;
}
@@ -1320,27 +1224,22 @@ int web_client_api_request_v1_ml_models(RRDHOST *host, struct web_client *w, cha
if (!netdata_ready)
return HTTP_RESP_BACKEND_FETCH_FAILED;
- char *s = ml_get_host_models(host);
- if (!s)
- s = strdupz("{\"error\": \"json string is empty\" }\n");
-
BUFFER *wb = w->response.data;
buffer_flush(wb);
- wb->contenttype = CT_APPLICATION_JSON;
- buffer_strcat(wb, s);
+ wb->content_type = CT_APPLICATION_JSON;
+ ml_host_get_models(host, wb);
buffer_no_cacheable(wb);
- freez(s);
return HTTP_RESP_OK;
}
-#endif
+#endif // ENABLE_ML
inline int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url) {
(void)url;
if (!netdata_ready) return HTTP_RESP_BACKEND_FETCH_FAILED;
BUFFER *wb = w->response.data;
buffer_flush(wb);
- wb->contenttype = CT_APPLICATION_JSON;
+ wb->content_type = CT_APPLICATION_JSON;
web_client_api_request_v1_info_fill_buffer(host, wb);
@@ -1360,94 +1259,19 @@ static int web_client_api_request_v1_aclk_state(RRDHOST *host, struct web_client
buffer_strcat(wb, str);
freez(str);
- wb->contenttype = CT_APPLICATION_JSON;
+ wb->content_type = CT_APPLICATION_JSON;
buffer_no_cacheable(wb);
return HTTP_RESP_OK;
}
-static int web_client_api_request_v1_weights_internal(RRDHOST *host, struct web_client *w, char *url, WEIGHTS_METHOD method, WEIGHTS_FORMAT format) {
- if (!netdata_ready)
- return HTTP_RESP_BACKEND_FETCH_FAILED;
-
- long long baseline_after = 0, baseline_before = 0, after = 0, before = 0, points = 0;
- RRDR_OPTIONS options = RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NONZERO | RRDR_OPTION_NULL2ZERO;
- int options_count = 0;
- RRDR_GROUPING group = RRDR_GROUPING_AVERAGE;
- int timeout = 0;
- size_t tier = 0;
- const char *group_options = NULL, *contexts_str = NULL;
-
- while (url) {
- char *value = mystrsep(&url, "&");
- if (!value || !*value)
- continue;
-
- char *name = mystrsep(&value, "=");
- if (!name || !*name)
- continue;
- if (!value || !*value)
- continue;
-
- if (!strcmp(name, "baseline_after"))
- baseline_after = (long long) strtoul(value, NULL, 0);
-
- else if (!strcmp(name, "baseline_before"))
- baseline_before = (long long) strtoul(value, NULL, 0);
-
- else if (!strcmp(name, "after") || !strcmp(name, "highlight_after"))
- after = (long long) strtoul(value, NULL, 0);
-
- else if (!strcmp(name, "before") || !strcmp(name, "highlight_before"))
- before = (long long) strtoul(value, NULL, 0);
-
- else if (!strcmp(name, "points") || !strcmp(name, "max_points"))
- points = (long long) strtoul(value, NULL, 0);
-
- else if (!strcmp(name, "timeout"))
- timeout = (int) strtoul(value, NULL, 0);
-
- else if(!strcmp(name, "group"))
- group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE);
-
- else if(!strcmp(name, "options")) {
- if(!options_count) options = RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NULL2ZERO;
- options |= web_client_api_request_v1_data_options(value);
- options_count++;
- }
-
- else if(!strcmp(name, "method"))
- method = weights_string_to_method(value);
-
- else if(!strcmp(name, "context") || !strcmp(name, "contexts"))
- contexts_str = value;
-
- else if(!strcmp(name, "tier")) {
- tier = str2ul(value);
- if(tier < storage_tiers)
- options |= RRDR_OPTION_SELECTED_TIER;
- else
- tier = 0;
- }
- }
-
- BUFFER *wb = w->response.data;
- buffer_flush(wb);
- wb->contenttype = CT_APPLICATION_JSON;
-
- SIMPLE_PATTERN *contexts = (contexts_str) ? simple_pattern_create(contexts_str, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT) : NULL;
-
- int ret = web_api_v1_weights(host, wb, method, format, group, group_options, baseline_after, baseline_before, after, before, points, options, contexts, tier, timeout);
-
- simple_pattern_free(contexts);
- return ret;
-}
-
int web_client_api_request_v1_metric_correlations(RRDHOST *host, struct web_client *w, char *url) {
- return web_client_api_request_v1_weights_internal(host, w, url, default_metric_correlations_method, WEIGHTS_FORMAT_CHARTS);
+ return web_client_api_request_weights(host, w, url, default_metric_correlations_method,
+ WEIGHTS_FORMAT_CHARTS, 1);
}
int web_client_api_request_v1_weights(RRDHOST *host, struct web_client *w, char *url) {
- return web_client_api_request_v1_weights_internal(host, w, url, WEIGHTS_METHOD_ANOMALY_RATE, WEIGHTS_FORMAT_CONTEXTS);
+ return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_ANOMALY_RATE,
+ WEIGHTS_FORMAT_CONTEXTS, 1);
}
int web_client_api_request_v1_function(RRDHOST *host, struct web_client *w, char *url) {
@@ -1458,11 +1282,11 @@ int web_client_api_request_v1_function(RRDHOST *host, struct web_client *w, char
const char *function = NULL;
while (url) {
- char *value = mystrsep(&url, "&");
+ char *value = strsep_skip_consecutive_separators(&url, "&");
if (!value || !*value)
continue;
- char *name = mystrsep(&value, "=");
+ char *name = strsep_skip_consecutive_separators(&value, "=");
if (!name || !*name)
continue;
@@ -1475,7 +1299,7 @@ int web_client_api_request_v1_function(RRDHOST *host, struct web_client *w, char
BUFFER *wb = w->response.data;
buffer_flush(wb);
- wb->contenttype = CT_APPLICATION_JSON;
+ wb->content_type = CT_APPLICATION_JSON;
buffer_no_cacheable(wb);
return rrd_call_function_and_wait(host, wb, timeout, function);
@@ -1487,12 +1311,12 @@ int web_client_api_request_v1_functions(RRDHOST *host, struct web_client *w, cha
BUFFER *wb = w->response.data;
buffer_flush(wb);
- wb->contenttype = CT_APPLICATION_JSON;
+ wb->content_type = CT_APPLICATION_JSON;
buffer_no_cacheable(wb);
- buffer_strcat(wb, "{\n");
- host_functions2json(host, wb, 1, "\"", "\"");
- buffer_strcat(wb, "}");
+ buffer_json_initialize(wb, "\"", "\"", 0, true, false);
+ host_functions2json(host, wb);
+ buffer_json_finalize(wb);
return HTTP_RESP_OK;
}
@@ -1576,7 +1400,7 @@ int web_client_api_request_v1_dbengine_stats(RRDHOST *host __maybe_unused, struc
return HTTP_RESP_NOT_FOUND;
}
- wb->contenttype = CT_APPLICATION_JSON;
+ wb->content_type = CT_APPLICATION_JSON;
buffer_no_cacheable(wb);
buffer_strcat(wb, "{");
for(size_t tier = 0; tier < storage_tiers ;tier++) {
@@ -1596,85 +1420,55 @@ int web_client_api_request_v1_dbengine_stats(RRDHOST *host __maybe_unused, struc
#define ACL_DEV_OPEN_ACCESS 0
#endif
-static struct api_command {
- const char *command;
- uint32_t hash;
- WEB_CLIENT_ACL acl;
- int (*callback)(RRDHOST *host, struct web_client *w, char *url);
-} api_commands[] = {
- { "info", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_info },
- { "data", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_data },
- { "chart", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_chart },
- { "charts", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_charts },
- { "context", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_context },
- { "contexts", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_contexts },
+static struct web_api_command api_commands_v1[] = {
+ { "info", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_info },
+ { "data", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_data },
+ { "chart", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_chart },
+ { "charts", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_charts },
+ { "context", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_context },
+ { "contexts", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_contexts },
// registry checks the ACL by itself, so we allow everything
{ "registry", 0, WEB_CLIENT_ACL_NOCHECK, web_client_api_request_v1_registry },
// badges can be fetched with both dashboard and badge permissions
- { "badge.svg", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_badge },
+ { "badge.svg", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC | WEB_CLIENT_ACL_BADGE, web_client_api_request_v1_badge },
- { "alarms", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarms },
- { "alarms_values", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarms_values },
- { "alarm_log", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarm_log },
- { "alarm_variables", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarm_variables },
- { "alarm_count", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarm_count },
- { "allmetrics", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_allmetrics },
+ { "alarms", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_alarms },
+ { "alarms_values", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_alarms_values },
+ { "alarm_log", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_alarm_log },
+ { "alarm_variables", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_alarm_variables },
+ { "alarm_count", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_alarm_count },
+ { "allmetrics", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_allmetrics },
#if defined(ENABLE_ML)
- { "ml_info", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_ml_info },
- { "ml_models", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_ml_models },
+ { "ml_info", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_ml_info },
+ { "ml_models", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_ml_models },
#endif
{ "manage/health", 0, WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_mgmt_health },
- { "aclk", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_aclk_state },
- { "metric_correlations", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_metric_correlations },
- { "weights", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_weights },
+ { "aclk", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_aclk_state },
+ { "metric_correlations", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_metric_correlations },
+ { "weights", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_weights },
{ "function", 0, WEB_CLIENT_ACL_ACLK | ACL_DEV_OPEN_ACCESS, web_client_api_request_v1_function },
{ "functions", 0, WEB_CLIENT_ACL_ACLK | ACL_DEV_OPEN_ACCESS, web_client_api_request_v1_functions },
- { "dbengine_stats", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_dbengine_stats },
+ { "dbengine_stats", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v1_dbengine_stats },
// terminator
{ NULL, 0, WEB_CLIENT_ACL_NONE, NULL },
};
-inline int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url) {
+inline int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url_path_endpoint) {
static int initialized = 0;
- int i;
if(unlikely(initialized == 0)) {
initialized = 1;
- for(i = 0; api_commands[i].command ; i++)
- api_commands[i].hash = simple_hash(api_commands[i].command);
+ for(int i = 0; api_commands_v1[i].command ; i++)
+ api_commands_v1[i].hash = simple_hash(api_commands_v1[i].command);
}
- // get the command
- if(url) {
- debug(D_WEB_CLIENT, "%llu: Searching for API v1 command '%s'.", w->id, url);
- uint32_t hash = simple_hash(url);
-
- for(i = 0; api_commands[i].command ;i++) {
- if(unlikely(hash == api_commands[i].hash && !strcmp(url, api_commands[i].command))) {
- if(unlikely(api_commands[i].acl != WEB_CLIENT_ACL_NOCHECK) && !(w->acl & api_commands[i].acl))
- return web_client_permission_denied(w);
-
- //return api_commands[i].callback(host, w, url);
- return api_commands[i].callback(host, w, (w->decoded_query_string + 1));
- }
- }
-
- buffer_flush(w->response.data);
- buffer_strcat(w->response.data, "Unsupported v1 API command: ");
- buffer_strcat_htmlescape(w->response.data, url);
- return HTTP_RESP_NOT_FOUND;
- }
- else {
- buffer_flush(w->response.data);
- buffer_sprintf(w->response.data, "Which API v1 command?");
- return HTTP_RESP_BAD_REQUEST;
- }
+ return web_client_api_request_vX(host, w, url_path_endpoint, api_commands_v1);
}
diff --git a/web/api/web_api_v1.h b/web/api/web_api_v1.h
index 9dd6a1c23..6fa8de017 100644
--- a/web/api/web_api_v1.h
+++ b/web/api/web_api_v1.h
@@ -3,14 +3,12 @@
#ifndef NETDATA_WEB_API_V1_H
#define NETDATA_WEB_API_V1_H 1
-#include "daemon/common.h"
-#include "web/api/badges/web_buffer_svg.h"
-#include "web/api/formatters/rrd2json.h"
-#include "web/api/health/health_cmdapi.h"
-#include "web/api/queries/weights.h"
+#include "web_api.h"
+
+struct web_client;
RRDR_OPTIONS web_client_api_request_v1_data_options(char *o);
-void web_client_api_request_v1_data_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options);
+void web_client_api_request_v1_data_options_to_buffer_json_array(BUFFER *wb, const char *key, RRDR_OPTIONS options);
void web_client_api_request_v1_data_options_to_string(char *buf, size_t size, RRDR_OPTIONS options);
uint32_t web_client_api_request_v1_data_format(char *name);
@@ -24,16 +22,17 @@ int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *
int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url);
int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url);
int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url);
-int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url);
int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url);
int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url);
-int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url_path_endpoint);
int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb);
-void host_labels2json(RRDHOST *host, BUFFER *wb, size_t indentation);
void web_client_api_v1_init(void);
void web_client_api_v1_management_init(void);
+void host_labels2json(RRDHOST *host, BUFFER *wb, const char *key);
+void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST *host, BUFFER *wb, const char *key);
+
extern char *api_secret;
#endif //NETDATA_WEB_API_V1_H
diff --git a/web/api/web_api_v2.c b/web/api/web_api_v2.c
new file mode 100644
index 000000000..7280c0427
--- /dev/null
+++ b/web/api/web_api_v2.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "web_api_v2.h"
+#include "../rtc/webrtc.h"
+
+static int web_client_api_request_v2_contexts_internal(RRDHOST *host __maybe_unused, struct web_client *w, char *url, CONTEXTS_V2_OPTIONS options) {
+ struct api_v2_contexts_request req = { 0 };
+
+ while(url) {
+ char *value = strsep_skip_consecutive_separators(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = strsep_skip_consecutive_separators(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ // name and value are now the parameters
+ // they are not null and not empty
+
+ if(!strcmp(name, "scope_nodes")) req.scope_nodes = value;
+ else if((options & (CONTEXTS_V2_NODES | CONTEXTS_V2_CONTEXTS)) && !strcmp(name, "nodes")) req.nodes = value;
+ else if((options & CONTEXTS_V2_CONTEXTS) && !strcmp(name, "scope_contexts")) req.scope_contexts = value;
+ else if((options & CONTEXTS_V2_CONTEXTS) && !strcmp(name, "contexts")) req.contexts = value;
+ else if((options & CONTEXTS_V2_SEARCH) && !strcmp(name, "q")) req.q = value;
+ else if(!strcmp(name, "timeout")) req.timeout_ms = str2l(value);
+ }
+
+ options |= CONTEXTS_V2_DEBUG;
+
+ buffer_flush(w->response.data);
+ buffer_no_cacheable(w->response.data);
+ return rrdcontext_to_json_v2(w->response.data, &req, options);
+}
+
+static int web_client_api_request_v2_q(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
+ return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_SEARCH | CONTEXTS_V2_CONTEXTS | CONTEXTS_V2_NODES);
+}
+
+static int web_client_api_request_v2_contexts(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
+ return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_CONTEXTS);
+}
+
+static int web_client_api_request_v2_nodes(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
+ return web_client_api_request_v2_contexts_internal(host, w, url, CONTEXTS_V2_NODES | CONTEXTS_V2_NODES_DETAILED);
+}
+
+static int web_client_api_request_v2_weights(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
+ return web_client_api_request_weights(host, w, url, WEIGHTS_METHOD_VALUE,
+ WEIGHTS_FORMAT_MULTINODE, 2);
+}
+
+#define GROUP_BY_KEY_MAX_LENGTH 30
+static struct {
+ char group_by[GROUP_BY_KEY_MAX_LENGTH + 1];
+ char aggregation[GROUP_BY_KEY_MAX_LENGTH + 1];
+ char group_by_label[GROUP_BY_KEY_MAX_LENGTH + 1];
+} group_by_keys[MAX_QUERY_GROUP_BY_PASSES];
+
+__attribute__((constructor)) void initialize_group_by_keys(void) {
+ for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
+ snprintfz(group_by_keys[g].group_by, GROUP_BY_KEY_MAX_LENGTH, "group_by[%zu]", g);
+ snprintfz(group_by_keys[g].aggregation, GROUP_BY_KEY_MAX_LENGTH, "aggregation[%zu]", g);
+ snprintfz(group_by_keys[g].group_by_label, GROUP_BY_KEY_MAX_LENGTH, "group_by_label[%zu]", g);
+ }
+}
+
+static int web_client_api_request_v2_data(RRDHOST *host __maybe_unused, struct web_client *w, char *url) {
+ usec_t received_ut = now_monotonic_usec();
+
+ int ret = HTTP_RESP_BAD_REQUEST;
+
+ buffer_flush(w->response.data);
+
+ char *google_version = "0.6",
+ *google_reqId = "0",
+ *google_sig = "0",
+ *google_out = "json",
+ *responseHandler = NULL,
+ *outFileName = NULL;
+
+ time_t last_timestamp_in_data = 0, google_timestamp = 0;
+
+ char *scope_nodes = NULL;
+ char *scope_contexts = NULL;
+ char *nodes = NULL;
+ char *contexts = NULL;
+ char *instances = NULL;
+ char *dimensions = NULL;
+ char *before_str = NULL;
+ char *after_str = NULL;
+ char *resampling_time_str = NULL;
+ char *points_str = NULL;
+ char *timeout_str = NULL;
+ char *labels = NULL;
+ char *alerts = NULL;
+ char *time_group_options = NULL;
+ char *tier_str = NULL;
+ size_t tier = 0;
+ RRDR_TIME_GROUPING time_group = RRDR_GROUPING_AVERAGE;
+ DATASOURCE_FORMAT format = DATASOURCE_JSON2;
+ RRDR_OPTIONS options = RRDR_OPTION_VIRTUAL_POINTS | RRDR_OPTION_JSON_WRAP | RRDR_OPTION_RETURN_JWAR;
+
+ struct group_by_pass group_by[MAX_QUERY_GROUP_BY_PASSES] = {
+ {
+ .group_by = RRDR_GROUP_BY_DIMENSION,
+ .group_by_label = NULL,
+ .aggregation = RRDR_GROUP_BY_FUNCTION_AVERAGE,
+ },
+ };
+
+ size_t group_by_idx = 0, group_by_label_idx = 0, aggregation_idx = 0;
+
+ while(url) {
+ char *value = strsep_skip_consecutive_separators(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = strsep_skip_consecutive_separators(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ // name and value are now the parameters
+ // they are not null and not empty
+
+ if(!strcmp(name, "scope_nodes")) scope_nodes = value;
+ else if(!strcmp(name, "scope_contexts")) scope_contexts = value;
+ else if(!strcmp(name, "nodes")) nodes = value;
+ else if(!strcmp(name, "contexts")) contexts = value;
+ else if(!strcmp(name, "instances")) instances = value;
+ else if(!strcmp(name, "dimensions")) dimensions = value;
+ else if(!strcmp(name, "labels")) labels = value;
+ else if(!strcmp(name, "alerts")) alerts = value;
+ else if(!strcmp(name, "after")) after_str = value;
+ else if(!strcmp(name, "before")) before_str = value;
+ else if(!strcmp(name, "points")) points_str = value;
+ else if(!strcmp(name, "timeout")) timeout_str = value;
+ else if(!strcmp(name, "group_by")) {
+ group_by[group_by_idx++].group_by = group_by_parse(value);
+ if(group_by_idx >= MAX_QUERY_GROUP_BY_PASSES)
+ group_by_idx = MAX_QUERY_GROUP_BY_PASSES - 1;
+ }
+ else if(!strcmp(name, "group_by_label")) {
+ group_by[group_by_label_idx++].group_by_label = value;
+ if(group_by_label_idx >= MAX_QUERY_GROUP_BY_PASSES)
+ group_by_label_idx = MAX_QUERY_GROUP_BY_PASSES - 1;
+ }
+ else if(!strcmp(name, "aggregation")) {
+ group_by[aggregation_idx++].aggregation = group_by_aggregate_function_parse(value);
+ if(aggregation_idx >= MAX_QUERY_GROUP_BY_PASSES)
+ aggregation_idx = MAX_QUERY_GROUP_BY_PASSES - 1;
+ }
+ else if(!strcmp(name, "format")) format = web_client_api_request_v1_data_format(value);
+ else if(!strcmp(name, "options")) options |= web_client_api_request_v1_data_options(value);
+ else if(!strcmp(name, "time_group")) time_group = time_grouping_parse(value, RRDR_GROUPING_AVERAGE);
+ else if(!strcmp(name, "time_group_options")) time_group_options = value;
+ else if(!strcmp(name, "time_resampling")) resampling_time_str = value;
+ else if(!strcmp(name, "tier")) tier_str = value;
+ else if(!strcmp(name, "callback")) responseHandler = value;
+ else if(!strcmp(name, "filename")) outFileName = value;
+ else if(!strcmp(name, "tqx")) {
+ // parse Google Visualization API options
+ // https://developers.google.com/chart/interactive/docs/dev/implementing_data_source
+ char *tqx_name, *tqx_value;
+
+ while(value) {
+ tqx_value = strsep_skip_consecutive_separators(&value, ";");
+ if(!tqx_value || !*tqx_value) continue;
+
+ tqx_name = strsep_skip_consecutive_separators(&tqx_value, ":");
+ if(!tqx_name || !*tqx_name) continue;
+ if(!tqx_value || !*tqx_value) continue;
+
+ if(!strcmp(tqx_name, "version"))
+ google_version = tqx_value;
+ else if(!strcmp(tqx_name, "reqId"))
+ google_reqId = tqx_value;
+ else if(!strcmp(tqx_name, "sig")) {
+ google_sig = tqx_value;
+ google_timestamp = strtoul(google_sig, NULL, 0);
+ }
+ else if(!strcmp(tqx_name, "out")) {
+ google_out = tqx_value;
+ format = web_client_api_request_v1_data_google_format(google_out);
+ }
+ else if(!strcmp(tqx_name, "responseHandler"))
+ responseHandler = tqx_value;
+ else if(!strcmp(tqx_name, "outFileName"))
+ outFileName = tqx_value;
+ }
+ }
+ else {
+ for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
+ if(!strcmp(name, group_by_keys[g].group_by))
+ group_by[g].group_by = group_by_parse(value);
+ else if(!strcmp(name, group_by_keys[g].group_by_label))
+ group_by[g].group_by_label = value;
+ else if(!strcmp(name, group_by_keys[g].aggregation))
+ group_by[g].aggregation = group_by_aggregate_function_parse(value);
+ }
+ }
+ }
+
+ // validate the google parameters given
+ fix_google_param(google_out);
+ fix_google_param(google_sig);
+ fix_google_param(google_reqId);
+ fix_google_param(google_version);
+ fix_google_param(responseHandler);
+ fix_google_param(outFileName);
+
+ for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
+ if (group_by[g].group_by_label && *group_by[g].group_by_label)
+ group_by[g].group_by |= RRDR_GROUP_BY_LABEL;
+ }
+
+ if(group_by[0].group_by == RRDR_GROUP_BY_NONE)
+ group_by[0].group_by = RRDR_GROUP_BY_DIMENSION;
+
+ for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
+ if ((group_by[g].group_by & ~(RRDR_GROUP_BY_DIMENSION)) || (options & RRDR_OPTION_PERCENTAGE)) {
+ options |= RRDR_OPTION_ABSOLUTE;
+ break;
+ }
+ }
+
+ if(options & RRDR_OPTION_DEBUG)
+ options &= ~RRDR_OPTION_MINIFY;
+
+ if(tier_str && *tier_str) {
+ tier = str2ul(tier_str);
+ if(tier < storage_tiers)
+ options |= RRDR_OPTION_SELECTED_TIER;
+ else
+ tier = 0;
+ }
+
+ time_t before = (before_str && *before_str)?str2l(before_str):0;
+ time_t after = (after_str && *after_str) ?str2l(after_str):-600;
+ size_t points = (points_str && *points_str)?str2u(points_str):0;
+ int timeout = (timeout_str && *timeout_str)?str2i(timeout_str): 0;
+ time_t resampling_time = (resampling_time_str && *resampling_time_str) ? str2l(resampling_time_str) : 0;
+
+ QUERY_TARGET_REQUEST qtr = {
+ .version = 2,
+ .scope_nodes = scope_nodes,
+ .scope_contexts = scope_contexts,
+ .after = after,
+ .before = before,
+ .host = NULL,
+ .st = NULL,
+ .nodes = nodes,
+ .contexts = contexts,
+ .instances = instances,
+ .dimensions = dimensions,
+ .alerts = alerts,
+ .timeout_ms = timeout,
+ .points = points,
+ .format = format,
+ .options = options,
+ .time_group_method = time_group,
+ .time_group_options = time_group_options,
+ .resampling_time = resampling_time,
+ .tier = tier,
+ .chart_label_key = NULL,
+ .labels = labels,
+ .query_source = QUERY_SOURCE_API_DATA,
+ .priority = STORAGE_PRIORITY_NORMAL,
+ .received_ut = received_ut,
+
+ .interrupt_callback = web_client_interrupt_callback,
+ .interrupt_callback_data = w,
+ };
+
+ for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++)
+ qtr.group_by[g] = group_by[g];
+
+ QUERY_TARGET *qt = query_target_create(&qtr);
+ ONEWAYALLOC *owa = NULL;
+
+ if(!qt) {
+ buffer_sprintf(w->response.data, "Failed to prepare the query.");
+ ret = HTTP_RESP_INTERNAL_SERVER_ERROR;
+ goto cleanup;
+ }
+
+ web_client_timeout_checkpoint_set(w, timeout);
+ if(web_client_timeout_checkpoint_and_check(w, NULL)) {
+ ret = w->response.code;
+ goto cleanup;
+ }
+
+ if(outFileName && *outFileName) {
+ buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName);
+ debug(D_WEB_CLIENT, "%llu: generating outfilename header: '%s'", w->id, outFileName);
+ }
+
+ if(format == DATASOURCE_DATATABLE_JSONP) {
+ if(responseHandler == NULL)
+ responseHandler = "google.visualization.Query.setResponse";
+
+ debug(D_WEB_CLIENT_ACCESS, "%llu: GOOGLE JSON/JSONP: version = '%s', reqId = '%s', sig = '%s', out = '%s', responseHandler = '%s', outFileName = '%s'",
+ w->id, google_version, google_reqId, google_sig, google_out, responseHandler, outFileName
+ );
+
+ buffer_sprintf(
+ w->response.data,
+ "%s({version:'%s',reqId:'%s',status:'ok',sig:'%"PRId64"',table:",
+ responseHandler,
+ google_version,
+ google_reqId,
+ (int64_t)now_realtime_sec());
+ }
+ else if(format == DATASOURCE_JSONP) {
+ if(responseHandler == NULL)
+ responseHandler = "callback";
+
+ buffer_strcat(w->response.data, responseHandler);
+ buffer_strcat(w->response.data, "(");
+ }
+
+ owa = onewayalloc_create(0);
+ ret = data_query_execute(owa, w->response.data, qt, &last_timestamp_in_data);
+
+ if(format == DATASOURCE_DATATABLE_JSONP) {
+ if(google_timestamp < last_timestamp_in_data)
+ buffer_strcat(w->response.data, "});");
+
+ else {
+ // the client already has the latest data
+ buffer_flush(w->response.data);
+ buffer_sprintf(w->response.data,
+ "%s({version:'%s',reqId:'%s',status:'error',errors:[{reason:'not_modified',message:'Data not modified'}]});",
+ responseHandler, google_version, google_reqId);
+ }
+ }
+ else if(format == DATASOURCE_JSONP)
+ buffer_strcat(w->response.data, ");");
+
+cleanup:
+ query_target_release(qt);
+ onewayalloc_destroy(owa);
+ return ret;
+}
+
+static int web_client_api_request_v2_webrtc(RRDHOST *host __maybe_unused, struct web_client *w, char *url __maybe_unused) {
+ return webrtc_new_connection(w->post_payload, w->response.data);
+}
+
+static struct web_api_command api_commands_v2[] = {
+ {"data", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_data},
+ {"nodes", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_nodes},
+ {"contexts", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_contexts},
+ {"weights", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_weights},
+ {"q", 0, WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC, web_client_api_request_v2_q},
+
+ {"rtc_offer", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v2_webrtc},
+
+ // terminator
+ {NULL, 0, WEB_CLIENT_ACL_NONE, NULL},
+};
+
+inline int web_client_api_request_v2(RRDHOST *host, struct web_client *w, char *url_path_endpoint) {
+ static int initialized = 0;
+
+ if(unlikely(initialized == 0)) {
+ initialized = 1;
+
+ for(int i = 0; api_commands_v2[i].command ; i++)
+ api_commands_v2[i].hash = simple_hash(api_commands_v2[i].command);
+ }
+
+ return web_client_api_request_vX(host, w, url_path_endpoint, api_commands_v2);
+}
diff --git a/web/api/web_api_v2.h b/web/api/web_api_v2.h
new file mode 100644
index 000000000..4a1893bd8
--- /dev/null
+++ b/web/api/web_api_v2.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WEB_API_V2_H
+#define NETDATA_WEB_API_V2_H 1
+
+#include "web_api.h"
+
+struct web_client;
+
+int web_client_api_request_v2(RRDHOST *host, struct web_client *w, char *url_path_endpoint);
+
+#endif //NETDATA_WEB_API_V2_H