summaryrefslogtreecommitdiffstats
path: root/web/api
diff options
context:
space:
mode:
Diffstat (limited to 'web/api')
-rw-r--r--web/api/README.md6
-rw-r--r--web/api/badges/web_buffer_svg.c51
-rw-r--r--web/api/badges/web_buffer_svg.h6
-rw-r--r--web/api/exporters/shell/allmetrics_shell.c20
-rw-r--r--web/api/formatters/csv/csv.c10
-rw-r--r--web/api/formatters/json/json.c21
-rw-r--r--web/api/formatters/json_wrapper.c102
-rw-r--r--web/api/formatters/json_wrapper.h5
-rw-r--r--web/api/formatters/rrd2json.c104
-rw-r--r--web/api/formatters/rrd2json.h14
-rw-r--r--web/api/formatters/rrdset2json.c24
-rw-r--r--web/api/formatters/ssv/ssv.c2
-rw-r--r--web/api/formatters/value/value.c25
-rw-r--r--web/api/formatters/value/value.h2
-rw-r--r--web/api/netdata-swagger.json1072
-rw-r--r--web/api/netdata-swagger.yaml912
-rw-r--r--web/api/queries/Makefile.am3
-rw-r--r--web/api/queries/average/average.c14
-rw-r--r--web/api/queries/average/average.h6
-rw-r--r--web/api/queries/countif/Makefile.am8
-rw-r--r--web/api/queries/countif/README.md36
-rw-r--r--web/api/queries/countif/countif.c136
-rw-r--r--web/api/queries/countif/countif.h15
-rw-r--r--web/api/queries/des/des.c36
-rw-r--r--web/api/queries/des/des.h6
-rw-r--r--web/api/queries/incremental_sum/incremental_sum.c16
-rw-r--r--web/api/queries/incremental_sum/incremental_sum.h6
-rw-r--r--web/api/queries/max/max.c16
-rw-r--r--web/api/queries/max/max.h6
-rw-r--r--web/api/queries/median/README.md17
-rw-r--r--web/api/queries/median/median.c111
-rw-r--r--web/api/queries/median/median.h14
-rw-r--r--web/api/queries/min/min.c16
-rw-r--r--web/api/queries/min/min.h6
-rw-r--r--web/api/queries/percentile/Makefile.am8
-rw-r--r--web/api/queries/percentile/README.md58
-rw-r--r--web/api/queries/percentile/percentile.c169
-rw-r--r--web/api/queries/percentile/percentile.h23
-rw-r--r--web/api/queries/query.c2395
-rw-r--r--web/api/queries/query.h35
-rw-r--r--web/api/queries/rrdr.c90
-rw-r--r--web/api/queries/rrdr.h92
-rw-r--r--web/api/queries/ses/ses.c28
-rw-r--r--web/api/queries/ses/ses.h6
-rw-r--r--web/api/queries/stddev/stddev.c42
-rw-r--r--web/api/queries/stddev/stddev.h12
-rw-r--r--web/api/queries/sum/sum.c14
-rw-r--r--web/api/queries/sum/sum.h6
-rw-r--r--web/api/queries/trimmed_mean/Makefile.am8
-rw-r--r--web/api/queries/trimmed_mean/README.md56
-rw-r--r--web/api/queries/trimmed_mean/trimmed_mean.c166
-rw-r--r--web/api/queries/trimmed_mean/trimmed_mean.h22
-rw-r--r--web/api/queries/weights.c1220
-rw-r--r--web/api/queries/weights.h33
-rw-r--r--web/api/web_api_v1.c468
-rw-r--r--web/api/web_api_v1.h5
56 files changed, 6313 insertions, 1487 deletions
diff --git a/web/api/README.md b/web/api/README.md
index 89a953002..fc520a09a 100644
--- a/web/api/README.md
+++ b/web/api/README.md
@@ -7,8 +7,6 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/README.m
## Netdata REST API
-The complete documentation of the Netdata API is available at the **[Swagger Editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml)**.
-
-If your prefer it over the Swagger Editor, you can also use **[Swagger UI](https://registry.my-netdata.io/swagger/#!/default/get_data)**. This however does not provide all the information available.
-
+The complete documentation of the Netdata API is available as a Swagger API document [in our GitHub repository](https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml). You can view it online using the **[Swagger Editor](https://editor.swagger.io/?url=https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml)**.
+If your prefer it over the Swagger Editor, you can also use [Swagger UI](https://github.com/swagger-api/swagger-ui) by pointing at `web/api/netdata-swagger.yaml` in the Netdata source tree (or at https://raw.githubusercontent.com/netdata/netdata/master/web/api/netdata-swagger.yaml if you want to use the Swagger API definitions directly from our GitHub repository). This however does not provide all the information available.
diff --git a/web/api/badges/web_buffer_svg.c b/web/api/badges/web_buffer_svg.c
index 65ca21d19..00b4ad650 100644
--- a/web/api/badges/web_buffer_svg.c
+++ b/web/api/badges/web_buffer_svg.c
@@ -249,7 +249,8 @@ cleanup:
return len - i;
}
-static inline char *format_value_with_precision_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision) {
+static inline char *format_value_with_precision_and_unit(char *value_string, size_t value_string_len,
+ NETDATA_DOUBLE value, const char *units, int precision) {
if(unlikely(isnan(value) || isinf(value)))
value = 0.0;
@@ -260,23 +261,23 @@ static inline char *format_value_with_precision_and_unit(char *value_string, siz
if(precision < 0) {
int len, lstop = 0, trim_zeros = 1;
- calculated_number abs = value;
+ NETDATA_DOUBLE abs = value;
if(isless(value, 0)) {
lstop = 1;
- abs = calculated_number_fabs(value);
+ abs = fabsndd(value);
}
if(isgreaterequal(abs, 1000)) {
- len = snprintfz(value_string, value_string_len, "%0.0" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ len = snprintfz(value_string, value_string_len, "%0.0" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
trim_zeros = 0;
}
- else if(isgreaterequal(abs, 10)) len = snprintfz(value_string, value_string_len, "%0.1" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
- else if(isgreaterequal(abs, 1)) len = snprintfz(value_string, value_string_len, "%0.2" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
- else if(isgreaterequal(abs, 0.1)) len = snprintfz(value_string, value_string_len, "%0.2" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
- else if(isgreaterequal(abs, 0.01)) len = snprintfz(value_string, value_string_len, "%0.4" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
- else if(isgreaterequal(abs, 0.001)) len = snprintfz(value_string, value_string_len, "%0.5" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
- else if(isgreaterequal(abs, 0.0001)) len = snprintfz(value_string, value_string_len, "%0.6" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
- else len = snprintfz(value_string, value_string_len, "%0.7" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE) value);
+ else if(isgreaterequal(abs, 10)) len = snprintfz(value_string, value_string_len, "%0.1" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
+ else if(isgreaterequal(abs, 1)) len = snprintfz(value_string, value_string_len, "%0.2" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.1)) len = snprintfz(value_string, value_string_len, "%0.2" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.01)) len = snprintfz(value_string, value_string_len, "%0.4" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.001)) len = snprintfz(value_string, value_string_len, "%0.5" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
+ else if(isgreaterequal(abs, 0.0001)) len = snprintfz(value_string, value_string_len, "%0.6" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
+ else len = snprintfz(value_string, value_string_len, "%0.7" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE) value);
if(unlikely(trim_zeros)) {
int l;
@@ -303,7 +304,7 @@ static inline char *format_value_with_precision_and_unit(char *value_string, siz
}
else {
if(precision > 50) precision = 50;
- snprintfz(value_string, value_string_len, "%0.*" LONG_DOUBLE_MODIFIER "%s%s", precision, (LONG_DOUBLE) value, separator, units);
+ snprintfz(value_string, value_string_len, "%0.*" NETDATA_DOUBLE_MODIFIER "%s%s", precision, (NETDATA_DOUBLE) value, separator, units);
}
return value_string;
@@ -359,7 +360,8 @@ static struct units_formatter {
{ NULL, 0, UNITS_FORMAT_NONE }
};
-inline char *format_value_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision) {
+inline char *format_value_and_unit(char *value_string, size_t value_string_len,
+ NETDATA_DOUBLE value, const char *units, int precision) {
static int max = -1;
int i;
@@ -555,7 +557,7 @@ typedef enum color_comparison {
COLOR_COMPARE_GREATEREQUAL,
} BADGE_COLOR_COMPARISON;
-static inline void calc_colorz(const char *color, char *final, size_t len, calculated_number value) {
+static inline void calc_colorz(const char *color, char *final, size_t len, NETDATA_DOUBLE value) {
if(isnan(value) || isinf(value))
value = NAN;
@@ -642,7 +644,7 @@ static inline void calc_colorz(const char *color, char *final, size_t len, calcu
*dc = '\0';
if(dv) {
*dv = '\0';
- calculated_number v;
+ NETDATA_DOUBLE v;
if(!*value_buffer || !strcmp(value_buffer, "null")) {
v = NAN;
@@ -732,7 +734,8 @@ static const char *parse_color_argument(const char *arg, const char *def)
return color_map(arg, def);
}
-void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options, int fixed_width_lbl, int fixed_width_val, const char* text_color_lbl, const char* text_color_val) {
+void buffer_svg(BUFFER *wb, const char *label,
+ NETDATA_DOUBLE value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options, int fixed_width_lbl, int fixed_width_val, const char* text_color_lbl, const char* text_color_val) {
char value_color_buffer[COLOR_STRING_SIZE + 1]
, value_string[VALUE_STRING_SIZE + 1]
, label_escaped[LABEL_STRING_SIZE + 1]
@@ -750,7 +753,7 @@ void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const ch
value_color = (isnan(value) || isinf(value))?"999":"4c1";
calc_colorz(value_color, value_color_buffer, COLOR_STRING_SIZE, value);
- format_value_and_unit(value_string, VALUE_STRING_SIZE, (options & RRDR_OPTION_DISPLAY_ABS)?calculated_number_fabs(value):value, units, precision);
+ format_value_and_unit(value_string, VALUE_STRING_SIZE, (options & RRDR_OPTION_DISPLAY_ABS)? fabsndd(value):value, units, precision);
if(fixed_width_lbl <= 0 || fixed_width_val <= 0) {
label_width = verdana11_width(label, font_size) + (BADGE_HORIZONTAL_PADDING * 2);
@@ -884,7 +887,8 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u
, *fixed_width_lbl_str = NULL
, *fixed_width_val_str = NULL
, *text_color_lbl_str = NULL
- , *text_color_val_str = NULL;
+ , *text_color_val_str = NULL
+ , *group_options = NULL;
int group = RRDR_GROUPING_AVERAGE;
uint32_t options = 0x00000000;
@@ -913,6 +917,7 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u
else if(!strcmp(name, "after")) after_str = value;
else if(!strcmp(name, "before")) before_str = value;
else if(!strcmp(name, "points")) points_str = value;
+ else if(!strcmp(name, "group_options")) group_options = value;
else if(!strcmp(name, "group")) {
group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE);
}
@@ -1096,13 +1101,17 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u
else {
time_t latest_timestamp = 0;
int value_is_null = 1;
- calculated_number n = NAN;
+ NETDATA_DOUBLE n = NAN;
ret = HTTP_RESP_INTERNAL_SERVER_ERROR;
// if the collected value is too old, don't calculate its value
if (rrdset_last_entry_t(st) >= (now_realtime_sec() - (st->update_every * st->gap_when_lost_iterations_above)))
- ret = rrdset2value_api_v1(st, w->response.data, &n, (dimensions) ? buffer_tostring(dimensions) : NULL
- , points, after, before, group, 0, options, NULL, &latest_timestamp, &value_is_null, 0);
+ ret = rrdset2value_api_v1(st, w->response.data, &n,
+ (dimensions) ? buffer_tostring(dimensions) : NULL,
+ points, after, before, group, group_options, 0, options,
+ NULL, &latest_timestamp,
+ NULL, NULL, NULL,
+ &value_is_null, NULL, 0, 0);
// if the value cannot be calculated, show empty badge
if (ret != HTTP_RESP_OK) {
diff --git a/web/api/badges/web_buffer_svg.h b/web/api/badges/web_buffer_svg.h
index 1cf69e20a..4853a8864 100644
--- a/web/api/badges/web_buffer_svg.h
+++ b/web/api/badges/web_buffer_svg.h
@@ -6,8 +6,10 @@
#include "libnetdata/libnetdata.h"
#include "web/server/web_client.h"
-extern void buffer_svg(BUFFER *wb, const char *label, calculated_number value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options, int fixed_width_lbl, int fixed_width_val, const char* text_color_lbl, const char* text_color_val);
-extern char *format_value_and_unit(char *value_string, size_t value_string_len, calculated_number value, const char *units, int precision);
+extern void buffer_svg(BUFFER *wb, const char *label,
+ NETDATA_DOUBLE value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options, int fixed_width_lbl, int fixed_width_val, const char* text_color_lbl, const char* text_color_val);
+extern char *format_value_and_unit(char *value_string, size_t value_string_len,
+ NETDATA_DOUBLE value, const char *units, int precision);
extern int web_client_api_request_v1_badge(struct rrdhost *host, struct web_client *w, char *url);
diff --git a/web/api/exporters/shell/allmetrics_shell.c b/web/api/exporters/shell/allmetrics_shell.c
index b9b6c904b..615aab43c 100644
--- a/web/api/exporters/shell/allmetrics_shell.c
+++ b/web/api/exporters/shell/allmetrics_shell.c
@@ -33,7 +33,7 @@ void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_
if (filter && !simple_pattern_matches(filter, st->name))
continue;
- calculated_number total = 0.0;
+ NETDATA_DOUBLE total = 0.0;
char chart[SHELL_ELEMENT_MAX + 1];
shell_name_copy(chart, st->name?st->name:st->id, SHELL_ELEMENT_MAX);
@@ -48,21 +48,21 @@ void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_
char dimension[SHELL_ELEMENT_MAX + 1];
shell_name_copy(dimension, rd->name?rd->name:rd->id, SHELL_ELEMENT_MAX);
- calculated_number n = rd->last_stored_value;
+ NETDATA_DOUBLE n = rd->last_stored_value;
if(isnan(n) || isinf(n))
buffer_sprintf(wb, "NETDATA_%s_%s=\"\" # %s\n", chart, dimension, st->units);
else {
if(rd->multiplier < 0 || rd->divisor < 0) n = -n;
- n = calculated_number_round(n);
+ n = roundndd(n);
if(!rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)) total += n;
- buffer_sprintf(wb, "NETDATA_%s_%s=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, dimension, n, st->units);
+ buffer_sprintf(wb, "NETDATA_%s_%s=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, dimension, n, st->units);
}
}
}
- total = calculated_number_round(total);
- buffer_sprintf(wb, "NETDATA_%s_VISIBLETOTAL=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, total, st->units);
+ total = roundndd(total);
+ buffer_sprintf(wb, "NETDATA_%s_VISIBLETOTAL=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, total, st->units);
rrdset_unlock(st);
}
}
@@ -79,13 +79,13 @@ void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_
char alarm[SHELL_ELEMENT_MAX + 1];
shell_name_copy(alarm, rc->name, SHELL_ELEMENT_MAX);
- calculated_number n = rc->value;
+ NETDATA_DOUBLE n = rc->value;
if(isnan(n) || isinf(n))
buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"\" # %s\n", chart, alarm, rc->units);
else {
- n = calculated_number_round(n);
- buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"" CALCULATED_NUMBER_FORMAT_ZERO "\" # %s\n", chart, alarm, n, rc->units);
+ n = roundndd(n);
+ buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, alarm, n, rc->units);
}
buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_STATUS=\"%s\"\n", chart, alarm, rrdcalc_status2string(rc->status));
@@ -154,7 +154,7 @@ void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_s
if(isnan(rd->last_stored_value))
buffer_strcat(wb, "null");
else
- buffer_sprintf(wb, CALCULATED_NUMBER_FORMAT, rd->last_stored_value);
+ buffer_sprintf(wb, NETDATA_DOUBLE_FORMAT, rd->last_stored_value);
buffer_strcat(wb, "\n\t\t\t}");
diff --git a/web/api/formatters/csv/csv.c b/web/api/formatters/csv/csv.c
index da0a6b583..6d87ca374 100644
--- a/web/api/formatters/csv/csv.c
+++ b/web/api/formatters/csv/csv.c
@@ -63,9 +63,9 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const
}
// for each line in the array
- calculated_number total = 1;
+ NETDATA_DOUBLE total = 1;
for(i = start; i != end ;i += step) {
- calculated_number *cn = &r->v[ i * r->d ];
+ NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
buffer_strcat(wb, betweenlines);
@@ -75,7 +75,7 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const
if((options & RRDR_OPTION_SECONDS) || (options & RRDR_OPTION_MILLISECONDS)) {
// print the timestamp of the line
- buffer_rrd_value(wb, (calculated_number)now);
+ buffer_rrd_value(wb, (NETDATA_DOUBLE)now);
// in ms
if(options & RRDR_OPTION_MILLISECONDS) buffer_strcat(wb, "000");
}
@@ -90,7 +90,7 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
total = 0;
for(c = 0, d = temp_rd?temp_rd:r->st->dimensions; d && c < r->d ;c++, d = d->next) {
- calculated_number n = cn[c];
+ NETDATA_DOUBLE n = cn[c];
if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
n = -n;
@@ -109,7 +109,7 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const
buffer_strcat(wb, separator);
- calculated_number n = cn[c];
+ NETDATA_DOUBLE n = cn[c];
if(co[c] & RRDR_VALUE_EMPTY) {
if(options & RRDR_OPTION_NULL2ZERO)
diff --git a/web/api/formatters/json/json.c b/web/api/formatters/json/json.c
index af1156d27..6f07b9aa4 100644
--- a/web/api/formatters/json/json.c
+++ b/web/api/formatters/json/json.c
@@ -158,10 +158,11 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, struct
);
// for each line in the array
- calculated_number total = 1;
+ NETDATA_DOUBLE total = 1;
for(i = start; i != end ;i += step) {
- calculated_number *cn = &r->v[ i * r->d ];
+ NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
+ NETDATA_DOUBLE *ar = &r->ar[ i * r->d ];
time_t now = r->t[i];
@@ -209,7 +210,7 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, struct
if(unlikely( options & RRDR_OPTION_OBJECTSROWS ))
buffer_fast_strcat(wb, object_rows_time, object_rows_time_len);
- buffer_rrd_value(wb, (calculated_number)r->t[i]);
+ buffer_rrd_value(wb, (NETDATA_DOUBLE)r->t[i]);
// in ms
if(unlikely(options & RRDR_OPTION_MILLISECONDS))
@@ -222,7 +223,11 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, struct
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
total = 0;
for(c = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
- calculated_number n = cn[c];
+ NETDATA_DOUBLE n;
+ if(unlikely(options & RRDR_OPTION_INTERNAL_AR))
+ n = ar[c];
+ else
+ n = cn[c];
if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
n = -n;
@@ -239,14 +244,18 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, struct
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
- calculated_number n = cn[c];
+ NETDATA_DOUBLE n;
+ if(unlikely(options & RRDR_OPTION_INTERNAL_AR))
+ n = ar[c];
+ else
+ n = cn[c];
buffer_fast_strcat(wb, pre_value, pre_value_len);
if(unlikely( options & RRDR_OPTION_OBJECTSROWS ))
buffer_sprintf(wb, "%s%s%s: ", kq, rd->name, kq);
- if(co[c] & RRDR_VALUE_EMPTY) {
+ if(co[c] & RRDR_VALUE_EMPTY && !(options & RRDR_OPTION_INTERNAL_AR)) {
if(unlikely(options & RRDR_OPTION_NULL2ZERO))
buffer_fast_strcat(wb, "0", 1);
else
diff --git a/web/api/formatters/json_wrapper.c b/web/api/formatters/json_wrapper.c
index 7097a5b77..04cace2fb 100644
--- a/web/api/formatters/json_wrapper.c
+++ b/web/api/formatters/json_wrapper.c
@@ -19,8 +19,23 @@ static int value_list_output(const char *name, void *entry, void *data) {
return 0;
}
+static int fill_formatted_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) {
+ (void)ls;
+ DICTIONARY *dict = (DICTIONARY *)data;
+ char n[RRD_ID_LENGTH_MAX * 2 + 2];
+ char output[RRD_ID_LENGTH_MAX * 2 + 8];
+ char v[RRD_ID_LENGTH_MAX * 2 + 1];
+
+ sanitize_json_string(v, (char *)value, RRD_ID_LENGTH_MAX * 2);
+ int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\", \"%s\"]", name, v);
+ snprintfz(n, RRD_ID_LENGTH_MAX * 2, "%s:%s", name, v);
+ dictionary_set(dict, n, output, len + 1);
+
+ return 1;
+}
+
void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value,
- QUERY_PARAMS *rrdset_query_data)
+ RRDR_GROUPING group_method, QUERY_PARAMS *rrdset_query_data)
{
struct context_param *context_param_list = rrdset_query_data->context_param_list;
char *chart_label_key = rrdset_query_data->chart_label_key;
@@ -61,7 +76,8 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
" %slast_entry%s: %u,\n"
" %sbefore%s: %u,\n"
" %safter%s: %u,\n"
- " %sdimension_names%s: ["
+ " %sgroup%s: %s%s%s,\n"
+ " %soptions%s: %s"
, kq, kq
, kq, kq, sq, context_mode && temp_rd?r->st->context:r->st->id, sq
, kq, kq, sq, context_mode && temp_rd?r->st->context:r->st->name, sq
@@ -71,7 +87,13 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
, kq, kq, (uint32_t) (context_param_list ? context_param_list->last_entry_t : rrdset_last_entry_t_nolock(r->st))
, kq, kq, (uint32_t)r->before
, kq, kq, (uint32_t)r->after
- , kq, kq);
+ , kq, kq, sq, web_client_api_request_v1_data_group_to_string(group_method), sq
+ , kq, kq, sq);
+
+ web_client_api_request_v1_data_options_to_string(wb, r->internal.query_options);
+
+ buffer_sprintf(wb, "%s,\n %sdimension_names%s: [", sq, kq, kq);
+
if (should_lock)
rrdset_unlock(r->st);
@@ -122,7 +144,6 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
char name[RRD_ID_LENGTH_MAX * 2 + 2];
char output[RRD_ID_LENGTH_MAX * 2 + 8];
- char value[RRD_ID_LENGTH_MAX * 2 + 1];
struct value_output co = {.c = 0, .wb = wb};
@@ -153,19 +174,8 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
for (i = 0, rd = temp_rd ? temp_rd : r->st->dimensions; rd; rd = rd->next) {
st = rd->rrdset;
- if (likely(st->state)) {
- struct label_index *labels = &st->state->labels;
- if (labels->head) {
- netdata_rwlock_rdlock(&labels->labels_rwlock);
- for (struct label *label = labels->head; label; label = label->next) {
- sanitize_json_string(value, label->value, RRD_ID_LENGTH_MAX * 2);
- int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\", \"%s\"]", label->key, value);
- snprintfz(name, RRD_ID_LENGTH_MAX * 2, "%s:%s", label->key, value);
- dictionary_set(dict, name, output, len + 1);
- }
- netdata_rwlock_unlock(&labels->labels_rwlock);
- }
- }
+ if (st->state && st->state->chart_labels)
+ rrdlabels_walkthrough_read(st->state->chart_labels, fill_formatted_callback, dict);
}
dictionary_walkthrough_read(dict, value_list_output, &co);
dictionary_destroy(dict);
@@ -207,8 +217,6 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
char *label_key = NULL;
int keys = 0;
while (pattern && (label_key = simple_pattern_iterate(&pattern))) {
- uint32_t key_hash = simple_hash(label_key);
- struct label *current_label;
if (keys)
buffer_strcat(wb, ", ");
@@ -223,13 +231,7 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
if (i)
buffer_strcat(wb, ", ");
- current_label = rrdset_lookup_label_key(rd->rrdset, label_key, key_hash);
- if (current_label) {
- buffer_strcat(wb, sq);
- buffer_strcat(wb, current_label->value);
- buffer_strcat(wb, sq);
- } else
- buffer_strcat(wb, "null");
+ rrdlabels_get_value_to_buffer_or_null(rd->rrdset->state->chart_labels, wb, label_key, sq, "null");
i++;
}
if (!i) {
@@ -255,7 +257,7 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
if(i) buffer_strcat(wb, ", ");
i++;
- calculated_number value = rd->last_stored_value;
+ NETDATA_DOUBLE value = rd->last_stored_value;
if (NAN == value)
buffer_strcat(wb, "null");
else
@@ -280,13 +282,13 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
i = 0;
if(rows) {
- calculated_number total = 1;
+ NETDATA_DOUBLE total = 1;
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
total = 0;
for(c = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
- calculated_number *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ];
- calculated_number n = cn[c];
+ NETDATA_DOUBLE *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ];
+ NETDATA_DOUBLE n = cn[c];
if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
n = -n;
@@ -304,9 +306,9 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
if(i) buffer_strcat(wb, ", ");
i++;
- calculated_number *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ];
+ NETDATA_DOUBLE *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ];
RRDR_VALUE_FLAGS *co = &r->o[ (rrdr_rows(r) - 1) * r->d ];
- calculated_number n = cn[c];
+ NETDATA_DOUBLE n = cn[c];
if(co[c] & RRDR_VALUE_EMPTY) {
if(options & RRDR_OPTION_NULL2ZERO)
@@ -341,12 +343,21 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
rrdr_buffer_print_format(wb, format);
+ buffer_sprintf(wb, "%s,\n"
+ " %sdb_points_per_tier%s: [ "
+ , sq
+ , kq, kq
+ );
+
+ for(int tier = 0; tier < storage_tiers ; tier++)
+ buffer_sprintf(wb, "%s%zu", tier>0?", ":"", r->internal.tier_points_read[tier]);
+
+ buffer_strcat(wb, " ]");
+
if((options & RRDR_OPTION_CUSTOM_VARS) && (options & RRDR_OPTION_JSON_WRAP)) {
- buffer_sprintf(wb, "%s,\n %schart_variables%s: ", sq, kq, kq);
+ buffer_sprintf(wb, ",\n %schart_variables%s: ", kq, kq);
health_api_v1_chart_custom_variables2json(r->st, wb);
}
- else
- buffer_sprintf(wb, "%s", sq);
buffer_sprintf(wb, ",\n %sresult%s: ", kq, kq);
@@ -354,6 +365,27 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
//info("JSONWRAPPER(): %s: END", r->st->id);
}
+void rrdr_json_wrapper_anomaly_rates(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value) {
+ (void)r;
+ (void)format;
+
+ char kq[2] = "", // key quote
+ sq[2] = ""; // string quote
+
+ if( options & RRDR_OPTION_GOOGLE_JSON ) {
+ kq[0] = '\0';
+ sq[0] = '\'';
+ }
+ else {
+ kq[0] = '"';
+ sq[0] = '"';
+ }
+
+ if(string_value) buffer_strcat(wb, sq);
+
+ buffer_sprintf(wb, ",\n %sanomaly_rates%s: ", kq, kq);
+}
+
void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value) {
(void)format;
diff --git a/web/api/formatters/json_wrapper.h b/web/api/formatters/json_wrapper.h
index 65dbd5b65..bfadc883e 100644
--- a/web/api/formatters/json_wrapper.h
+++ b/web/api/formatters/json_wrapper.h
@@ -4,9 +4,12 @@
#define NETDATA_API_FORMATTER_JSON_WRAPPER_H
#include "rrd2json.h"
+#include "web/api/queries/query.h"
+
extern void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value,
- QUERY_PARAMS *query_params);
+ RRDR_GROUPING group_method, QUERY_PARAMS *query_params);
+extern void rrdr_json_wrapper_anomaly_rates(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value);
extern void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value);
#endif //NETDATA_API_FORMATTER_JSON_WRAPPER_H
diff --git a/web/api/formatters/rrd2json.c b/web/api/formatters/rrd2json.c
index 1de6be4e3..7aa478d95 100644
--- a/web/api/formatters/rrd2json.c
+++ b/web/api/formatters/rrd2json.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "web/api/web_api_v1.h"
+#include "database/storage_engine.h"
static inline void free_single_rrdrim(ONEWAYALLOC *owa, RRDDIM *temp_rd, int archive_mode)
{
@@ -18,7 +19,18 @@ static inline void free_single_rrdrim(ONEWAYALLOC *owa, RRDDIM *temp_rd, int arc
}
}
- onewayalloc_freez(owa, temp_rd->state);
+ for(int tier = 0; tier < storage_tiers ;tier++) {
+ if(!temp_rd->tiers[tier]) continue;
+
+ if(archive_mode) {
+ STORAGE_ENGINE *eng = storage_engine_get(temp_rd->tiers[tier]->mode);
+ if (eng)
+ eng->api.free(temp_rd->tiers[tier]->db_metric_handle);
+ }
+
+ onewayalloc_freez(owa, temp_rd->tiers[tier]);
+ }
+
onewayalloc_freez(owa, temp_rd);
}
@@ -50,10 +62,22 @@ void rebuild_context_param_list(ONEWAYALLOC *owa, struct context_param *context_
RRDDIM *temp_rd = context_param_list->rd;
RRDDIM *new_rd_list = NULL, *t;
int is_archived = (context_param_list->flags & CONTEXT_FLAGS_ARCHIVE);
+
+ RRDSET *st = temp_rd->rrdset;
+ RRDSET *last_st = st;
+ time_t last_entry_t = is_archived ? st->last_entry_t : rrdset_last_entry_t(st);
+ time_t last_last_entry_t = last_entry_t;
while (temp_rd) {
t = temp_rd->next;
- RRDSET *st = temp_rd->rrdset;
- time_t last_entry_t = is_archived ? st->last_entry_t : rrdset_last_entry_t(st);
+
+ st = temp_rd->rrdset;
+ if (st == last_st) {
+ last_entry_t = last_last_entry_t;
+ }else {
+ last_entry_t = is_archived ? st->last_entry_t : rrdset_last_entry_t(st);
+ last_last_entry_t = last_entry_t;
+ last_st = st;
+ }
if (last_entry_t >= after_requested) {
temp_rd->next = new_rd_list;
@@ -86,10 +110,15 @@ void build_context_param_list(ONEWAYALLOC *owa, struct context_param **param_lis
(*param_list)->last_entry_t = MAX((*param_list)->last_entry_t, rrdset_last_entry_t_nolock(st));
rrddim_foreach_read(rd1, st) {
- RRDDIM *rd = onewayalloc_memdupz(owa, rd1, rd1->memsize);
+ RRDDIM *rd = onewayalloc_memdupz(owa, rd1, sizeof(RRDDIM));
rd->id = onewayalloc_strdupz(owa, rd1->id);
rd->name = onewayalloc_strdupz(owa, rd1->name);
- rd->state = onewayalloc_memdupz(owa, rd1->state, sizeof(*rd->state));
+ for(int tier = 0; tier < storage_tiers ;tier++) {
+ if(rd1->tiers[tier])
+ rd->tiers[tier] = onewayalloc_memdupz(owa, rd1->tiers[tier], sizeof(*rd->tiers[tier]));
+ else
+ rd->tiers[tier] = NULL;
+ }
rd->next = (*param_list)->rd;
(*param_list)->rd = rd;
}
@@ -152,24 +181,32 @@ void rrdr_buffer_print_format(BUFFER *wb, uint32_t format) {
int rrdset2value_api_v1(
RRDSET *st
, BUFFER *wb
- , calculated_number *n
+ , NETDATA_DOUBLE *n
, const char *dimensions
, long points
, long long after
, long long before
, int group_method
+ , const char *group_options
, long group_time
, uint32_t options
, time_t *db_after
, time_t *db_before
+ , size_t *db_points_read
+ , size_t *db_points_per_tier
+ , size_t *result_points_generated
, int *value_is_null
+ , NETDATA_DOUBLE *anomaly_rate
, int timeout
+ , int tier
) {
int ret = HTTP_RESP_INTERNAL_SERVER_ERROR;
ONEWAYALLOC *owa = onewayalloc_create(0);
- RRDR *r = rrd2rrdr(owa, st, points, after, before, group_method, group_time, options, dimensions, NULL, timeout);
+ RRDR *r = rrd2rrdr(owa, st, points, after, before,
+ group_method, group_time, options, dimensions, NULL,
+ group_options, timeout, tier);
if(!r) {
if(value_is_null) *value_is_null = 1;
@@ -177,9 +214,18 @@ int rrdset2value_api_v1(
goto cleanup;
}
- if(rrdr_rows(r) == 0) {
- rrdr_free(owa, r);
+ if(db_points_read)
+ *db_points_read += r->internal.db_points_read;
+
+ if(db_points_per_tier) {
+ for(int t = 0; t < storage_tiers ;t++)
+ db_points_per_tier[t] += r->internal.tier_points_read[t];
+ }
+
+ if(result_points_generated)
+ *result_points_generated += r->internal.result_points_generated;
+ if(rrdr_rows(r) == 0) {
if(db_after) *db_after = 0;
if(db_before) *db_before = 0;
if(value_is_null) *value_is_null = 1;
@@ -199,7 +245,7 @@ int rrdset2value_api_v1(
if(db_before) *db_before = r->before;
long i = (!(options & RRDR_OPTION_REVERSED))?rrdr_rows(r) - 1:0;
- *n = rrdr2value(r, i, options, value_is_null, NULL);
+ *n = rrdr2value(r, i, options, value_is_null, anomaly_rate, NULL);
ret = HTTP_RESP_OK;
cleanup:
@@ -218,9 +264,11 @@ int rrdset2anything_api_v1(
, long long after
, long long before
, int group_method
+ , const char *group_options
, long group_time
, uint32_t options
, time_t *latest_timestamp
+ , int tier
)
{
BUFFER *wb = query_params->wb;
@@ -238,7 +286,8 @@ int rrdset2anything_api_v1(
options,
dimensions ? buffer_tostring(dimensions) : NULL,
query_params->context_param_list,
- query_params->timeout);
+ group_options,
+ query_params->timeout, tier);
if(!r) {
buffer_strcat(wb, "Cannot generate output with these parameters on this chart.");
return HTTP_RESP_INTERNAL_SERVER_ERROR;
@@ -249,7 +298,7 @@ int rrdset2anything_api_v1(
return HTTP_RESP_BACKEND_FETCH_FAILED;
}
- if (st && st->state && st->state->is_ar_chart)
+ if (st->state && st->state->is_ar_chart)
ml_process_rrdr(r, query_params->max_anomaly_rates);
RRDDIM *temp_rd = query_params->context_param_list ? query_params->context_param_list->rd : NULL;
@@ -266,7 +315,7 @@ int rrdset2anything_api_v1(
case DATASOURCE_SSV:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method, query_params);
rrdr2ssv(r, wb, options, "", " ", "", temp_rd);
rrdr_json_wrapper_end(r, wb, format, options, 1);
}
@@ -279,7 +328,7 @@ int rrdset2anything_api_v1(
case DATASOURCE_SSV_COMMA:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method, query_params);
rrdr2ssv(r, wb, options, "", ",", "", temp_rd);
rrdr_json_wrapper_end(r, wb, format, options, 1);
}
@@ -292,7 +341,7 @@ int rrdset2anything_api_v1(
case DATASOURCE_JS_ARRAY:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 0, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method, query_params);
rrdr2ssv(r, wb, options, "[", ",", "]", temp_rd);
rrdr_json_wrapper_end(r, wb, format, options, 0);
}
@@ -305,7 +354,7 @@ int rrdset2anything_api_v1(
case DATASOURCE_CSV:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method, query_params);
rrdr2csv(r, wb, format, options, "", ",", "\\n", "", temp_rd);
rrdr_json_wrapper_end(r, wb, format, options, 1);
}
@@ -318,7 +367,7 @@ int rrdset2anything_api_v1(
case DATASOURCE_CSV_MARKDOWN:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method, query_params);
rrdr2csv(r, wb, format, options, "", "|", "\\n", "", temp_rd);
rrdr_json_wrapper_end(r, wb, format, options, 1);
}
@@ -331,7 +380,7 @@ int rrdset2anything_api_v1(
case DATASOURCE_CSV_JSON_ARRAY:
wb->contenttype = CT_APPLICATION_JSON;
if(options & RRDR_OPTION_JSON_WRAP) {
- rrdr_json_wrapper_begin(r, wb, format, options, 0, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method, query_params);
buffer_strcat(wb, "[\n");
rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n", temp_rd);
buffer_strcat(wb, "\n]");
@@ -348,7 +397,7 @@ int rrdset2anything_api_v1(
case DATASOURCE_TSV:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method, query_params);
rrdr2csv(r, wb, format, options, "", "\t", "\\n", "", temp_rd);
rrdr_json_wrapper_end(r, wb, format, options, 1);
}
@@ -361,7 +410,7 @@ int rrdset2anything_api_v1(
case DATASOURCE_HTML:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method, query_params);
buffer_strcat(wb, "<html>\\n<center>\\n<table border=\\\"0\\\" cellpadding=\\\"5\\\" cellspacing=\\\"5\\\">\\n");
rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\\n", "", temp_rd);
buffer_strcat(wb, "</table>\\n</center>\\n</html>\\n");
@@ -379,7 +428,7 @@ int rrdset2anything_api_v1(
wb->contenttype = CT_APPLICATION_X_JAVASCRIPT;
if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_begin(r, wb, format, options, 0, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method, query_params);
rrdr2json(r, wb, options, 1, query_params->context_param_list);
@@ -391,7 +440,7 @@ int rrdset2anything_api_v1(
wb->contenttype = CT_APPLICATION_JSON;
if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_begin(r, wb, format, options, 0, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method, query_params);
rrdr2json(r, wb, options, 1, query_params->context_param_list);
@@ -402,7 +451,7 @@ int rrdset2anything_api_v1(
case DATASOURCE_JSONP:
wb->contenttype = CT_APPLICATION_X_JAVASCRIPT;
if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_begin(r, wb, format, options, 0, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method, query_params);
rrdr2json(r, wb, options, 0, query_params->context_param_list);
@@ -415,12 +464,17 @@ int rrdset2anything_api_v1(
wb->contenttype = CT_APPLICATION_JSON;
if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_begin(r, wb, format, options, 0, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method, query_params);
rrdr2json(r, wb, options, 0, query_params->context_param_list);
- if(options & RRDR_OPTION_JSON_WRAP)
+ if(options & RRDR_OPTION_JSON_WRAP) {
+ if(options & RRDR_OPTION_RETURN_JWAR) {
+ rrdr_json_wrapper_anomaly_rates(r, wb, format, options, 0);
+ rrdr2json(r, wb, options | RRDR_OPTION_INTERNAL_AR, 0, query_params->context_param_list);
+ }
rrdr_json_wrapper_end(r, wb, format, options, 0);
+ }
break;
}
diff --git a/web/api/formatters/rrd2json.h b/web/api/formatters/rrd2json.h
index 60bed5b90..6be53ff8a 100644
--- a/web/api/formatters/rrd2json.h
+++ b/web/api/formatters/rrd2json.h
@@ -67,33 +67,41 @@ extern void rrdr_buffer_print_format(BUFFER *wb, uint32_t format);
extern int rrdset2anything_api_v1(
ONEWAYALLOC *owa
, RRDSET *st
- ,
- QUERY_PARAMS *query_params, BUFFER *dimensions
+ , QUERY_PARAMS *query_params
+ , BUFFER *dimensions
, uint32_t format
, long points
, long long after
, long long before
, int group_method
+ , const char *group_options
, long group_time
, uint32_t options
, time_t *latest_timestamp
+ , int tier
);
extern int rrdset2value_api_v1(
RRDSET *st
, BUFFER *wb
- , calculated_number *n
+ , NETDATA_DOUBLE *n
, const char *dimensions
, long points
, long long after
, long long before
, int group_method
+ , const char *group_options
, long group_time
, uint32_t options
, time_t *db_after
, time_t *db_before
+ , size_t *db_points_read
+ , size_t *db_points_per_tier
+ , size_t *result_points_generated
, int *value_is_null
+ , NETDATA_DOUBLE *anomaly_rate
, int timeout
+ , int tier
);
extern void build_context_param_list(ONEWAYALLOC *owa, struct context_param **param_list, RRDSET *st);
diff --git a/web/api/formatters/rrdset2json.c b/web/api/formatters/rrdset2json.c
index c83b22e63..de8d87bae 100644
--- a/web/api/formatters/rrdset2json.c
+++ b/web/api/formatters/rrdset2json.c
@@ -4,32 +4,22 @@
void chart_labels2json(RRDSET *st, BUFFER *wb, size_t indentation)
{
+ if(unlikely(!st->state || !st->state->chart_labels))
+ return;
+
char tabs[11];
- struct label_index *labels = &st->state->labels;
if (indentation > 10)
indentation = 10;
tabs[0] = '\0';
while (indentation) {
- strcat(tabs, "\t");
+ strcat(tabs, "\t\t");
indentation--;
}
- int count = 0;
- netdata_rwlock_rdlock(&labels->labels_rwlock);
- for (struct label *label = labels->head; label; label = label->next) {
- if(count > 0) buffer_strcat(wb, ",\n");
- buffer_strcat(wb, tabs);
-
- char value[CONFIG_MAX_VALUE * 2 + 1];
- sanitize_json_string(value, label->value, CONFIG_MAX_VALUE * 2);
- buffer_sprintf(wb, "\"%s\": \"%s\"", label->key, value);
-
- count++;
- }
+ rrdlabels_to_buffer(st->state->chart_labels, wb, tabs, ":", "\"", ",\n", NULL, NULL, NULL, NULL);
buffer_strcat(wb, "\n");
- netdata_rwlock_unlock(&labels->labels_rwlock);
}
// generate JSON for the /api/v1/chart API call
@@ -95,14 +85,14 @@ void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memor
"\t\t\t\"dimensions\": {\n",
st->update_every);
- unsigned long memory = st->memsize;
+ unsigned long memory = sizeof(RRDSET) + st->memsize;
size_t dimensions = 0;
RRDDIM *rd;
rrddim_foreach_read(rd, st) {
if(rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN) || rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) continue;
- memory += rd->memsize;
+ memory += sizeof(RRDDIM) + rd->memsize;
if (dimensions)
buffer_strcat(wb, ",\n\t\t\t\t\"");
diff --git a/web/api/formatters/ssv/ssv.c b/web/api/formatters/ssv/ssv.c
index 8d3ddbfdf..850182da1 100644
--- a/web/api/formatters/ssv/ssv.c
+++ b/web/api/formatters/ssv/ssv.c
@@ -17,7 +17,7 @@ void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, con
// for each line in the array
for(i = start; i != end ;i += step) {
int all_values_are_null = 0;
- calculated_number v = rrdr2value(r, i, options, &all_values_are_null, temp_rd);
+ NETDATA_DOUBLE v = rrdr2value(r, i, options, &all_values_are_null, NULL, temp_rd);
if(likely(i != start)) {
if(r->min > v) r->min = v;
diff --git a/web/api/formatters/value/value.c b/web/api/formatters/value/value.c
index 9ac91f509..30e00c068 100644
--- a/web/api/formatters/value/value.c
+++ b/web/api/formatters/value/value.c
@@ -3,25 +3,25 @@
#include "value.h"
-inline calculated_number rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null, RRDDIM *temp_rd) {
- if (r->st_needs_lock)
- rrdset_check_rdlock(r->st);
-
+inline NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null, NETDATA_DOUBLE *anomaly_rate, RRDDIM *temp_rd) {
long c;
RRDDIM *d;
- calculated_number *cn = &r->v[ i * r->d ];
+ NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
+ NETDATA_DOUBLE *ar = &r->ar[ i * r->d ];
- calculated_number sum = 0, min = 0, max = 0, v;
+ NETDATA_DOUBLE sum = 0, min = 0, max = 0, v;
int all_null = 1, init = 1;
- calculated_number total = 1;
+ NETDATA_DOUBLE total = 1;
+ NETDATA_DOUBLE total_anomaly_rate = 0;
+
int set_min_max = 0;
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
total = 0;
for (c = 0, d = temp_rd ? temp_rd : r->st->dimensions; d && c < r->d; c++, d = d->next) {
- calculated_number n = cn[c];
+ NETDATA_DOUBLE n = cn[c];
if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
n = -n;
@@ -38,7 +38,7 @@ inline calculated_number rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
- calculated_number n = cn[c];
+ NETDATA_DOUBLE n = cn[c];
if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
n = -n;
@@ -74,6 +74,13 @@ inline calculated_number rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *
if(n < min) min = n;
if(n > max) max = n;
+
+ total_anomaly_rate += ar[c];
+ }
+
+ if(anomaly_rate) {
+ if(!r->d) *anomaly_rate = 0;
+ else *anomaly_rate = total_anomaly_rate / r->d;
}
if(unlikely(all_null)) {
diff --git a/web/api/formatters/value/value.h b/web/api/formatters/value/value.h
index 2d6bd1242..fc1c7bf08 100644
--- a/web/api/formatters/value/value.h
+++ b/web/api/formatters/value/value.h
@@ -5,6 +5,6 @@
#include "../rrd2json.h"
-extern calculated_number rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null, RRDDIM *temp_rd);
+extern NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null, NETDATA_DOUBLE *anomaly_rate, RRDDIM *temp_rd);
#endif //NETDATA_API_FORMATTER_VALUE_H
diff --git a/web/api/netdata-swagger.json b/web/api/netdata-swagger.json
index 97427d323..029783b55 100644
--- a/web/api/netdata-swagger.json
+++ b/web/api/netdata-swagger.json
@@ -48,7 +48,7 @@
"/chart": {
"get": {
"summary": "Get info about a specific chart",
- "description": "The Chart endpoint returns detailed information about a chart.",
+ "description": "The chart endpoint returns detailed information about a chart.",
"parameters": [
{
"name": "chart",
@@ -82,6 +82,219 @@
}
}
},
+ "/contexts": {
+ "get": {
+ "summary": "Get a list of all contexts available at the server",
+ "description": "The contexts endpoint returns a summary about all contexts stored in the netdata server.",
+ "parameters": [
+ {
+ "name": "options",
+ "in": "query",
+ "description": "Options that affect data generation.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "full",
+ "all",
+ "charts",
+ "dimensions",
+ "labels",
+ "uuids",
+ "queue",
+ "flags",
+ "deleted",
+ "deepscan"
+ ]
+ },
+ "default": [
+ "full"
+ ]
+ }
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "limit the results on context having data after this timestamp.",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "before",
+ "in": "query",
+ "description": "limit the results on context having data before this timestamp.",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "chart_label_key",
+ "in": "query",
+ "description": "a simple pattern matching charts label keys (use comma or pipe as separator)",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "chart_labels_filter",
+ "in": "query",
+ "description": "a simple pattern matching charts label key and values (use colon for equality, comma or pipe as separator)",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "dimensions",
+ "in": "query",
+ "description": "a simple pattern matching dimensions (use comma or pipe as separator)",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "An array of contexts.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/context_summary"
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "/context": {
+ "get": {
+ "summary": "Get info about a specific context",
+ "description": "The context endpoint returns detailed information about a given context.",
+ "parameters": [
+ {
+ "name": "context",
+ "in": "query",
+ "description": "The id of the context as returned by the /contexts call.",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "format": "as returned by /contexts",
+ "default": "system.cpu"
+ }
+ },
+ {
+ "name": "options",
+ "in": "query",
+ "description": "Options that affect data generation.",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "full",
+ "all",
+ "charts",
+ "dimensions",
+ "labels",
+ "uuids",
+ "queue",
+ "flags",
+ "deleted",
+ "deepscan"
+ ]
+ },
+ "default": [
+ "full"
+ ]
+ }
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "limit the results on context having data after this timestamp.",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "before",
+ "in": "query",
+ "description": "limit the results on context having data before this timestamp.",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "chart_label_key",
+ "in": "query",
+ "description": "a simple pattern matching charts label keys (use comma or pipe as separator)",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "chart_labels_filter",
+ "in": "query",
+ "description": "a simple pattern matching charts label key and values (use colon for equality, comma or pipe as separator)",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "dimensions",
+ "in": "query",
+ "description": "a simple pattern matching dimensions (use comma or pipe as separator)",
+ "required": false,
+ "allowEmptyValue": true,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "A javascript object with detailed information about the context.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/context"
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "No context id was supplied in the request."
+ },
+ "404": {
+ "description": "No context with the given id is found."
+ }
+ }
+ }
+ },
"/alarm_variables": {
"get": {
"summary": "List variables available to configure alarms for a chart",
@@ -236,12 +449,54 @@
"median",
"stddev",
"sum",
- "incremental-sum"
+ "incremental-sum",
+ "ses",
+ "des",
+ "cv",
+ "countif",
+ "percentile",
+ "percentile25",
+ "percentile50",
+ "percentile75",
+ "percentile80",
+ "percentile90",
+ "percentile95",
+ "percentile97",
+ "percentile98",
+ "percentile99",
+ "trimmed-mean",
+ "trimmed-mean1",
+ "trimmed-mean2",
+ "trimmed-mean3",
+ "trimmed-mean5",
+ "trimmed-mean10",
+ "trimmed-mean15",
+ "trimmed-mean20",
+ "trimmed-mean25",
+ "trimmed-median",
+ "trimmed-median1",
+ "trimmed-median2",
+ "trimmed-median3",
+ "trimmed-median5",
+ "trimmed-median10",
+ "trimmed-median15",
+ "trimmed-median20",
+ "trimmed-median25"
],
"default": "average"
}
},
{
+ "name": "group_options",
+ "in": "query",
+ "description": "When the group function supports additional parameters, this field can be used to pass them to it. Currently only \"countif\" supports this.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
"name": "gtime",
"in": "query",
"description": "The grouping number of seconds. This is used in conjunction with group=average to change the units of metrics (ie when the data is per-second, setting gtime=60 will turn them to per-minute).",
@@ -319,7 +574,8 @@
"match-ids",
"match-names",
"showcustomvars",
- "allow_past"
+ "allow_past",
+ "anomaly-bit"
]
},
"default": [
@@ -462,7 +718,39 @@
"median",
"stddev",
"sum",
- "incremental-sum"
+ "incremental-sum",
+ "ses",
+ "des",
+ "cv",
+ "countif",
+ "percentile",
+ "percentile25",
+ "percentile50",
+ "percentile75",
+ "percentile80",
+ "percentile90",
+ "percentile95",
+ "percentile97",
+ "percentile98",
+ "percentile99",
+ "trimmed-mean",
+ "trimmed-mean1",
+ "trimmed-mean2",
+ "trimmed-mean3",
+ "trimmed-mean5",
+ "trimmed-mean10",
+ "trimmed-mean15",
+ "trimmed-mean20",
+ "trimmed-mean25",
+ "trimmed-median",
+ "trimmed-median1",
+ "trimmed-median2",
+ "trimmed-median3",
+ "trimmed-median5",
+ "trimmed-median10",
+ "trimmed-median15",
+ "trimmed-median20",
+ "trimmed-median25"
],
"default": "average"
}
@@ -484,7 +772,8 @@
"absolute-sum",
"null2zero",
"percentage",
- "unaligned"
+ "unaligned",
+ "anomaly-bit"
]
},
"default": [
@@ -908,7 +1197,7 @@
"/alarms_values": {
"get": {
"summary": "Get a list of active or raised alarms on the server",
- "description": "The alarms_values endpoint returns the list of all raised or enabled alarms on the netdata server. Called without any parameters, the raised alarms in state WARNING or CRITICAL are returned. By passing \"?all\", all the enabled alarms are returned. This option output differs from `/alarms` in the number of variables delivered. This endpoint gives to user `id`, `value`, `last_updated` time and alarm `status`.",
+ "description": "The alarms_values endpoint returns the list of all raised or enabled alarms on the netdata server. Called without any parameters, the raised alarms in state WARNING or CRITICAL are returned. By passing '?all', all the enabled alarms are returned. This option output differs from `/alarms` in the number of variables delivered. This endpoint gives to user `id`, `value`, `last_updated` time, and alarm `status`.",
"parameters": [
{
"name": "all",
@@ -1115,7 +1404,7 @@
"/aclk": {
"get": {
"summary": "Get information about current ACLK state",
- "description": "aclk endpoint returns detailed information about current state of ACLK (Agent to Cloud communication).",
+ "description": "ACLK endpoint returns detailed information about current state of ACLK (Agent to Cloud communication).",
"responses": {
"200": {
"description": "JSON object with ACLK information.",
@@ -1129,6 +1418,446 @@
}
}
}
+ },
+ "/metric_correlations": {
+ "get": {
+ "summary": "Analyze all the metrics to find their correlations",
+ "description": "THIS ENDPOINT IS OBSOLETE. Use the /weights endpoint. Given two time-windows (baseline, highlight), it goes through all the available metrics, querying both windows and tries to find how these two windows relate to each other. It supports multiple algorithms to do so. The result is a list of all metrics evaluated, weighted for 0.0 (the two windows are more different) to 1.0 (the two windows are similar). The algorithm adjusts automatically the baseline window to be a power of two multiple of the highlighted (1, 2, 4, 8, etc).",
+ "parameters": [
+ {
+ "name": "baseline_after",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the starting point of baseline window, or a relative number of seconds (negative, relative to parameter baseline_before). Netdata will assume it is a relative number if it is less that 3 years (in seconds).",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -300
+ }
+ },
+ {
+ "name": "baseline_before",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the ending point of the baseline window, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds).",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -60
+ }
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the starting point of highlighted window, or a relative number of seconds (negative, relative to parameter highlight_before). Netdata will assume it is a relative number if it is less that 3 years (in seconds).",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -60
+ }
+ },
+ {
+ "name": "before",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the ending point of the highlighted window, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds).",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ {
+ "name": "points",
+ "in": "query",
+ "description": "The number of points to be evaluated for the highlighted window. The baseline window will be adjusted automatically to receive a proportional amount of points.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 500
+ }
+ },
+ {
+ "name": "method",
+ "in": "query",
+ "description": "the algorithm to run",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "ks2",
+ "volume"
+ ],
+ "default": "ks2"
+ }
+ },
+ {
+ "name": "timeout",
+ "in": "query",
+ "description": "Cancel the query if to takes more that this amount of milliseconds.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 60000
+ }
+ },
+ {
+ "name": "options",
+ "in": "query",
+ "description": "Options that affect data generation.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "min2max",
+ "abs",
+ "absolute",
+ "absolute-sum",
+ "null2zero",
+ "percentage",
+ "unaligned",
+ "allow_past",
+ "nonzero",
+ "anomaly-bit",
+ "raw"
+ ]
+ },
+ "default": [
+ "null2zero",
+ "allow_past",
+ "nonzero",
+ "unaligned"
+ ]
+ }
+ },
+ {
+ "name": "group",
+ "in": "query",
+ "description": "The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods supported \"min\", \"max\", \"average\", \"sum\", \"incremental-sum\". \"max\" is actually calculated on the absolute value collected (so it works for both positive and negative dimensions to return the most extreme value in either direction).",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "min",
+ "max",
+ "average",
+ "median",
+ "stddev",
+ "sum",
+ "incremental-sum",
+ "ses",
+ "des",
+ "cv",
+ "countif",
+ "percentile",
+ "percentile25",
+ "percentile50",
+ "percentile75",
+ "percentile80",
+ "percentile90",
+ "percentile95",
+ "percentile97",
+ "percentile98",
+ "percentile99",
+ "trimmed-mean",
+ "trimmed-mean1",
+ "trimmed-mean2",
+ "trimmed-mean3",
+ "trimmed-mean5",
+ "trimmed-mean10",
+ "trimmed-mean15",
+ "trimmed-mean20",
+ "trimmed-mean25",
+ "trimmed-median",
+ "trimmed-median1",
+ "trimmed-median2",
+ "trimmed-median3",
+ "trimmed-median5",
+ "trimmed-median10",
+ "trimmed-median15",
+ "trimmed-median20",
+ "trimmed-median25"
+ ],
+ "default": "average"
+ }
+ },
+ {
+ "name": "group_options",
+ "in": "query",
+ "description": "When the group function supports additional parameters, this field can be used to pass them to it. Currently only \"countif\" supports this.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "JSON object with weights for each chart and dimension.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/metric_correlations"
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "The given parameters are invalid."
+ },
+ "403": {
+ "description": "metrics correlations are not enabled on this Netdata Agent."
+ },
+ "404": {
+ "description": "No charts could be found, or the method that correlated the metrics did not produce any result."
+ },
+ "504": {
+ "description": "Timeout - the query took too long and has been cancelled."
+ }
+ }
+ }
+ },
+ "/weights": {
+ "get": {
+ "summary": "Analyze all the metrics using an algorithm and score them accordingly",
+ "description": "This endpoint goes through all metrics and scores them according to an algorithm.",
+ "parameters": [
+ {
+ "name": "baseline_after",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the starting point of baseline window, or a relative number of seconds (negative, relative to parameter baseline_before). Netdata will assume it is a relative number if it is less that 3 years (in seconds). This parameter is used in KS2 and VOLUME algorithms.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -300
+ }
+ },
+ {
+ "name": "baseline_before",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the ending point of the baseline window, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds). This parameter is used in KS2 and VOLUME algorithms.",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -60
+ }
+ },
+ {
+ "name": "after",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the starting point of highlighted window, or a relative number of seconds (negative, relative to parameter highlight_before). Netdata will assume it is a relative number if it is less that 3 years (in seconds).",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": -60
+ }
+ },
+ {
+ "name": "before",
+ "in": "query",
+ "description": "This parameter can either be an absolute timestamp specifying the ending point of the highlighted window, or a relative number of seconds (negative), relative to the last collected timestamp. Netdata will assume it is a relative number if it is less than 3 years (in seconds).",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 0
+ }
+ },
+ {
+ "name": "context",
+ "in": "query",
+ "description": "A simple pattern matching the contexts to evaluate.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "points",
+ "in": "query",
+ "description": "The number of points to be evaluated for the highlighted window. The baseline window will be adjusted automatically to receive a proportional amount of points. This parameter is only used by the KS2 algorithm.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 500
+ }
+ },
+ {
+ "name": "method",
+ "in": "query",
+ "description": "the algorithm to run",
+ "required": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "ks2",
+ "volume",
+ "anomaly-rate"
+ ],
+ "default": "anomaly-rate"
+ }
+ },
+ {
+ "name": "tier",
+ "in": "query",
+ "description": "Use the specified database tier",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer"
+ }
+ },
+ {
+ "name": "timeout",
+ "in": "query",
+ "description": "Cancel the query if to takes more that this amount of milliseconds.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 60000
+ }
+ },
+ {
+ "name": "options",
+ "in": "query",
+ "description": "Options that affect data generation.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "min2max",
+ "abs",
+ "absolute",
+ "absolute-sum",
+ "null2zero",
+ "percentage",
+ "unaligned",
+ "nonzero",
+ "anomaly-bit",
+ "raw"
+ ]
+ },
+ "default": [
+ "null2zero",
+ "nonzero",
+ "unaligned"
+ ]
+ }
+ },
+ {
+ "name": "group",
+ "in": "query",
+ "description": "The grouping method. If multiple collected values are to be grouped in order to return fewer points, this parameters defines the method of grouping. methods supported \"min\", \"max\", \"average\", \"sum\", \"incremental-sum\". \"max\" is actually calculated on the absolute value collected (so it works for both positive and negative dimensions to return the most extreme value in either direction).",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string",
+ "enum": [
+ "min",
+ "max",
+ "average",
+ "median",
+ "stddev",
+ "sum",
+ "incremental-sum",
+ "ses",
+ "des",
+ "cv",
+ "countif",
+ "percentile",
+ "percentile25",
+ "percentile50",
+ "percentile75",
+ "percentile80",
+ "percentile90",
+ "percentile95",
+ "percentile97",
+ "percentile98",
+ "percentile99",
+ "trimmed-mean",
+ "trimmed-mean1",
+ "trimmed-mean2",
+ "trimmed-mean3",
+ "trimmed-mean5",
+ "trimmed-mean10",
+ "trimmed-mean15",
+ "trimmed-mean20",
+ "trimmed-mean25",
+ "trimmed-median",
+ "trimmed-median1",
+ "trimmed-median2",
+ "trimmed-median3",
+ "trimmed-median5",
+ "trimmed-median10",
+ "trimmed-median15",
+ "trimmed-median20",
+ "trimmed-median25"
+ ],
+ "default": "average"
+ }
+ },
+ {
+ "name": "group_options",
+ "in": "query",
+ "description": "When the group function supports additional parameters, this field can be used to pass them to it. Currently only \"countif\" supports this.",
+ "required": false,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "JSON object with weights for each context, chart and dimension.",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/weights"
+ }
+ }
+ }
+ },
+ "400": {
+ "description": "The given parameters are invalid."
+ },
+ "403": {
+ "description": "metrics correlations are not enabled on this Netdata Agent."
+ },
+ "404": {
+ "description": "No charts could be found, or the method that correlated the metrics did not produce any result."
+ },
+ "504": {
+ "description": "Timeout - the query took too long and has been cancelled."
+ }
+ }
+ }
}
},
"servers": [
@@ -1267,7 +1996,7 @@
"stream_compression": {
"type": "boolean",
"description": "Stream transmission compression method.",
- "example": "true"
+ "example": true
},
"labels": {
"type": "object",
@@ -1480,6 +2209,89 @@
}
}
},
+ "context_summary": {
+ "type": "object",
+ "properties": {
+ "hostname": {
+ "type": "string",
+ "description": "The hostname of the netdata server."
+ },
+ "machine_guid": {
+ "type": "string",
+ "description": "The unique installation id of this netdata server."
+ },
+ "node_id": {
+ "type": "string",
+ "description": "The unique node id of this netdata server at the hub.",
+ "example": "nightly"
+ },
+ "claim_id": {
+ "type": "string",
+ "description": "The unique handshake id of this netdata server and the hub."
+ },
+ "host_labels": {
+ "type": "object",
+ "description": "The host labels associated with this netdata server."
+ },
+ "context": {
+ "type": "object",
+ "description": "An object containing all the context objects available at the netdata server. This is used as an indexed array. The key of each context object is the id of the context.",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/context"
+ }
+ }
+ }
+ },
+ "context": {
+ "type": "object",
+ "properties": {
+ "version": {
+ "type": "string",
+ "description": "The version of this context. The number are not sequential, but bigger numbers depict a newer object."
+ },
+ "hub_version": {
+ "type": "string",
+ "description": "The version of this context, as known by hub."
+ },
+ "family": {
+ "type": "string",
+ "description": "The family of the context. When multiple charts of a context have different families, the netdata server replaces the different parts with [x], so that the context can have only one family."
+ },
+ "title": {
+ "type": "string",
+ "description": "The title of the context. When multiple charts of a context have different titles, the netdata server replaces the different parts with [x], so that the context can have only one title."
+ },
+ "priority": {
+ "type": "number",
+ "description": "The relative priority of the context. When multiple contexts have different priorities, the minimum among them is selected as the priority of the context."
+ },
+ "units": {
+ "type": "string",
+ "description": "The unit of measurement for the values of all dimensions of the context. If multiple charts of context have different units, the latest collected is selected."
+ },
+ "chart_type": {
+ "type": "string",
+ "description": "The chart type.",
+ "enum": [
+ "line",
+ "area",
+ "stacked"
+ ]
+ },
+ "first_time_t": {
+ "type": "number",
+ "description": "The UNIX timestamp of the first entry (the oldest) in the database."
+ },
+ "last_time_t": {
+ "type": "number",
+ "description": "The UNIX timestamp of the latest entry in the database."
+ },
+ "charts": {
+ "type": "object",
+ "description": "An object containing all the charts available for the chart. This is used as an indexed array. For each pair in the dictionary, the key is the id of the chart and the value provides all details about the chart."
+ }
+ }
+ },
"alarm_variables": {
"type": "object",
"properties": {
@@ -2135,7 +2947,7 @@
"type": "object",
"properties": {
"aclk-available": {
- "type": "boolean",
+ "type": "string",
"description": "Describes whether this agent is capable of connection to the Cloud. False means agent has been built without ACLK component either on purpose (user choice) or due to missing dependency."
},
"aclk-version": {
@@ -2153,7 +2965,7 @@
"type": "boolean",
"description": "Informs whether this agent has been added to a space in the cloud (User has to perform claiming). If false (user didn't perform claiming) agent will never attempt any cloud connection."
},
- "claimed-id": {
+ "claimed_id": {
"type": "string",
"format": "uuid",
"description": "Unique ID this agent uses to identify when connecting to cloud"
@@ -2171,7 +2983,245 @@
]
}
}
+ },
+ "metric_correlations": {
+ "type": "object",
+ "properties": {
+ "after": {
+ "description": "the start time of the highlighted window",
+ "type": "integer"
+ },
+ "before": {
+ "description": "the end time of the highlighted window",
+ "type": "integer"
+ },
+ "duration": {
+ "description": "the duration of the highlighted window",
+ "type": "integer"
+ },
+ "points": {
+ "description": "the points of the highlighted window",
+ "type": "integer"
+ },
+ "baseline_after": {
+ "description": "the start time of the baseline window",
+ "type": "integer"
+ },
+ "baseline_before": {
+ "description": "the end time of the baseline window",
+ "type": "integer"
+ },
+ "baseline_duration": {
+ "description": "the duration of the baseline window",
+ "type": "integer"
+ },
+ "baseline_points": {
+ "description": "the points of the baseline window",
+ "type": "integer"
+ },
+ "group": {
+ "description": "the grouping method across time",
+ "type": "string"
+ },
+ "method": {
+ "description": "the correlation method used",
+ "type": "string"
+ },
+ "options": {
+ "description": "a comma separated list of the query options set",
+ "type": "string"
+ },
+ "correlated_dimensions": {
+ "description": "the number of dimensions returned in the result"
+ },
+ "total_dimensions_count": {
+ "description": "the total number of dimensions evaluated",
+ "type": "integer"
+ },
+ "statistics": {
+ "type": "object",
+ "properties": {
+ "query_time_ms": {
+ "type": "number"
+ },
+ "db_queries": {
+ "type": "integer"
+ },
+ "db_points_read": {
+ "type": "integer"
+ },
+ "query_result_points": {
+ "type": "integer"
+ },
+ "binary_searches": {
+ "type": "integer"
+ }
+ }
+ },
+ "correlated_charts": {
+ "type": "object",
+ "description": "An object containing chart objects with their metrics correlations.",
+ "properties": {
+ "chart-id1": {
+ "type": "object",
+ "properties": {
+ "context": {
+ "type": "string"
+ },
+ "dimensions": {
+ "type": "object",
+ "properties": {
+ "dimension1-name": {
+ "type": "number"
+ },
+ "dimension2-name": {
+ "type": "number"
+ }
+ }
+ }
+ }
+ },
+ "chart-id2": {
+ "type": "object",
+ "properties": {
+ "context": {
+ "type": "string"
+ },
+ "dimensions": {
+ "type": "object",
+ "properties": {
+ "dimension1-name": {
+ "type": "number"
+ },
+ "dimension2-name": {
+ "type": "number"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "weights": {
+ "type": "object",
+ "properties": {
+ "after": {
+ "description": "the start time of the highlighted window",
+ "type": "integer"
+ },
+ "before": {
+ "description": "the end time of the highlighted window",
+ "type": "integer"
+ },
+ "duration": {
+ "description": "the duration of the highlighted window",
+ "type": "integer"
+ },
+ "points": {
+ "description": "the points of the highlighted window",
+ "type": "integer"
+ },
+ "baseline_after": {
+ "description": "the start time of the baseline window",
+ "type": "integer"
+ },
+ "baseline_before": {
+ "description": "the end time of the baseline window",
+ "type": "integer"
+ },
+ "baseline_duration": {
+ "description": "the duration of the baseline window",
+ "type": "integer"
+ },
+ "baseline_points": {
+ "description": "the points of the baseline window",
+ "type": "integer"
+ },
+ "group": {
+ "description": "the grouping method across time",
+ "type": "string"
+ },
+ "method": {
+ "description": "the correlation method used",
+ "type": "string"
+ },
+ "options": {
+ "description": "a comma separated list of the query options set",
+ "type": "string"
+ },
+ "correlated_dimensions": {
+ "description": "the number of dimensions returned in the result"
+ },
+ "total_dimensions_count": {
+ "description": "the total number of dimensions evaluated",
+ "type": "integer"
+ },
+ "statistics": {
+ "type": "object",
+ "properties": {
+ "query_time_ms": {
+ "type": "number"
+ },
+ "db_queries": {
+ "type": "integer"
+ },
+ "db_points_read": {
+ "type": "integer"
+ },
+ "query_result_points": {
+ "type": "integer"
+ },
+ "binary_searches": {
+ "type": "integer"
+ }
+ }
+ },
+ "contexts": {
+ "description": "A dictionary of weighted context objects.",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/weighted_context"
+ }
+ }
+ }
+ },
+ "weighted_context": {
+ "type": "object",
+ "properties": {
+ "weight": {
+ "description": "The average weight of the context.",
+ "type": "number"
+ },
+ "charts": {
+ "description": "A dictionary of weighted chart objects.",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/weighted_chart"
+ }
+ }
+ }
+ },
+ "weighted_chart": {
+ "type": "object",
+ "properties": {
+ "weight": {
+ "description": "The average weight of the context.",
+ "type": "number"
+ },
+ "dimensions": {
+ "description": "A dictionary of weighted dimensions.",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/components/schemas/weighted_dimension"
+ }
+ }
+ }
+ },
+ "weighted_dimension": {
+ "type": "number"
}
}
}
-}
+} \ No newline at end of file
diff --git a/web/api/netdata-swagger.yaml b/web/api/netdata-swagger.yaml
index 96920375e..2e04e9f20 100644
--- a/web/api/netdata-swagger.yaml
+++ b/web/api/netdata-swagger.yaml
@@ -43,7 +43,7 @@ paths:
/chart:
get:
summary: Get info about a specific chart
- description: The Chart endpoint returns detailed information about a chart.
+ description: The chart endpoint returns detailed information about a chart.
parameters:
- name: chart
in: query
@@ -64,6 +64,159 @@ paths:
description: No chart id was supplied in the request.
"404":
description: No chart with the given id is found.
+ /contexts:
+ get:
+ summary: Get a list of all contexts available at the server
+ description: The contexts endpoint returns a summary about all contexts stored in the
+ netdata server.
+ parameters:
+ - name: options
+ in: query
+ description: Options that affect data generation.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: array
+ items:
+ type: string
+ enum:
+ - full
+ - all
+ - charts
+ - dimensions
+ - labels
+ - uuids
+ - queue
+ - flags
+ - deleted
+ - deepscan
+ default:
+ - full
+ - name: after
+ in: query
+ description: limit the results on context having data after this timestamp.
+ required: false
+ schema:
+ type: number
+ format: integer
+ - name: before
+ in: query
+ description: limit the results on context having data before this timestamp.
+ required: false
+ schema:
+ type: number
+ format: integer
+ - name: chart_label_key
+ in: query
+ description: a simple pattern matching charts label keys (use comma or pipe as separator)
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ - name: chart_labels_filter
+ in: query
+ description: "a simple pattern matching charts label key and values (use colon for equality, comma or pipe
+ as separator)"
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ - name: dimensions
+ in: query
+ description: a simple pattern matching dimensions (use comma or pipe as separator)
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ responses:
+ "200":
+ description: An array of contexts.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/context_summary"
+ /context:
+ get:
+ summary: Get info about a specific context
+ description: The context endpoint returns detailed information about a given context.
+ parameters:
+ - name: context
+ in: query
+ description: The id of the context as returned by the /contexts call.
+ required: true
+ schema:
+ type: string
+ format: as returned by /contexts
+ default: system.cpu
+ - name: options
+ in: query
+ description: Options that affect data generation.
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: array
+ items:
+ type: string
+ enum:
+ - full
+ - all
+ - charts
+ - dimensions
+ - labels
+ - uuids
+ - queue
+ - flags
+ - deleted
+ - deepscan
+ default:
+ - full
+ - name: after
+ in: query
+ description: limit the results on context having data after this timestamp.
+ required: false
+ schema:
+ type: number
+ format: integer
+ - name: before
+ in: query
+ description: limit the results on context having data before this timestamp.
+ required: false
+ schema:
+ type: number
+ format: integer
+ - name: chart_label_key
+ in: query
+ description: a simple pattern matching charts label keys (use comma or pipe as separator)
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ - name: chart_labels_filter
+ in: query
+ description: "a simple pattern matching charts label key and values (use colon for equality, comma or pipe
+ as separator)"
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ - name: dimensions
+ in: query
+ description: a simple pattern matching dimensions (use comma or pipe as separator)
+ required: false
+ allowEmptyValue: true
+ schema:
+ type: string
+ responses:
+ "200":
+ description: A javascript object with detailed information about the context.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/context"
+ "400":
+ description: No context id was supplied in the request.
+ "404":
+ description: No context with the given id is found.
/alarm_variables:
get:
summary: List variables available to configure alarms for a chart
@@ -208,7 +361,47 @@ paths:
- stddev
- sum
- incremental-sum
+ - ses
+ - des
+ - cv
+ - countif
+ - percentile
+ - percentile25
+ - percentile50
+ - percentile75
+ - percentile80
+ - percentile90
+ - percentile95
+ - percentile97
+ - percentile98
+ - percentile99
+ - trimmed-mean
+ - trimmed-mean1
+ - trimmed-mean2
+ - trimmed-mean3
+ - trimmed-mean5
+ - trimmed-mean10
+ - trimmed-mean15
+ - trimmed-mean20
+ - trimmed-mean25
+ - trimmed-median
+ - trimmed-median1
+ - trimmed-median2
+ - trimmed-median3
+ - trimmed-median5
+ - trimmed-median10
+ - trimmed-median15
+ - trimmed-median20
+ - trimmed-median25
default: average
+ - name: group_options
+ in: query
+ description: When the group function supports additional parameters, this field
+ can be used to pass them to it. Currently only "countif" supports this.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
- name: gtime
in: query
description: The grouping number of seconds. This is used in conjunction with
@@ -280,6 +473,7 @@ paths:
- match-names
- showcustomvars
- allow_past
+ - anomaly-bit
default:
- seconds
- jsonwrap
@@ -409,6 +603,38 @@ paths:
- stddev
- sum
- incremental-sum
+ - ses
+ - des
+ - cv
+ - countif
+ - percentile
+ - percentile25
+ - percentile50
+ - percentile75
+ - percentile80
+ - percentile90
+ - percentile95
+ - percentile97
+ - percentile98
+ - percentile99
+ - trimmed-mean
+ - trimmed-mean1
+ - trimmed-mean2
+ - trimmed-mean3
+ - trimmed-mean5
+ - trimmed-mean10
+ - trimmed-mean15
+ - trimmed-mean20
+ - trimmed-mean25
+ - trimmed-median
+ - trimmed-median1
+ - trimmed-median2
+ - trimmed-median3
+ - trimmed-median5
+ - trimmed-median10
+ - trimmed-median15
+ - trimmed-median20
+ - trimmed-median25
default: average
- name: options
in: query
@@ -427,6 +653,7 @@ paths:
- null2zero
- percentage
- unaligned
+ - anomaly-bit
default:
- absolute
- name: label
@@ -447,7 +674,9 @@ paths:
format: any text
- name: label_color
in: query
- description: A color to be used for the background of the label side(left side) of the badge. One of predefined colors or specific color in hex `RGB` or `RRGGBB` format (without preceding `#` character). If value wrong or not given default color will be used.
+ description: "A color to be used for the background of the label side(left side) of the badge.
+ One of predefined colors or specific color in hex `RGB` or `RRGGBB` format (without preceding `#` character).
+ If value wrong or not given default color will be used."
required: false
allowEmptyValue: true
schema:
@@ -472,7 +701,8 @@ paths:
description: "A color to be used for the background of the value *(right)* part of badge. You can set
multiple using a pipe with a condition each, like this:
`color<value|color:null` The following operators are
- supported: >, <, >=, <=, =, :null (to check if no value exists). Each color can be specified in same manner as for `label_color` parameter.
+ supported: >, <, >=, <=, =, :null (to check if no value exists).
+ Each color can be specified in same manner as for `label_color` parameter.
Currently only integers are supported as values."
required: false
allowEmptyValue: true
@@ -481,7 +711,9 @@ paths:
format: any text
- name: text_color_lbl
in: query
- description: Font color for label *(left)* part of the badge. One of predefined colors or as HTML hexadecimal color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default color will be used.
+ description: "Font color for label *(left)* part of the badge. One of predefined colors or as HTML hexadecimal
+ color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default
+ color will be used."
required: false
allowEmptyValue: true
schema:
@@ -503,7 +735,9 @@ paths:
format: ^([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$
- name: text_color_val
in: query
- description: Font color for value *(right)* part of the badge. One of predefined colors or as HTML hexadecimal color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value given default color will be used.
+ description: "Font color for value *(right)* part of the badge. One of predefined colors or as HTML
+ hexadecimal color without preceding `#` character. Formats allowed `RGB` or `RRGGBB`. If no or wrong value
+ given default color will be used."
required: false
allowEmptyValue: true
schema:
@@ -551,7 +785,12 @@ paths:
format: integer
- name: fixed_width_lbl
in: query
- description: This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter determines the size of the label's left side *(label/name)*. You must set this parameter together with `fixed_width_val` otherwise it will be ignored. You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.
+ description: "This parameter overrides auto-sizing of badge and creates it with fixed width.
+ This parameter determines the size of the label's left side *(label/name)*.
+ You must set this parameter together with `fixed_width_val` otherwise it will be ignored.
+ You should set the label/value widths wide enough to provide space for all the possible values/contents of
+ the badge you're requesting. In case the text cannot fit the space given it will be clipped.
+ The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`."
required: false
allowEmptyValue: false
schema:
@@ -559,7 +798,12 @@ paths:
format: integer
- name: fixed_width_val
in: query
- description: This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter determines the size of the label's right side *(value)*. You must set this parameter together with `fixed_width_lbl` otherwise it will be ignored. You should set the label/value widths wide enough to provide space for all the possible values/contents of the badge you're requesting. In case the text cannot fit the space given it will be clipped. The `scale` parameter still applies on the values you give to `fixed_width_lbl` and `fixed_width_val`.
+ description: "This parameter overrides auto-sizing of badge and creates it with fixed width. This parameter
+ determines the size of the label's right side *(value)*. You must set this parameter together with
+ `fixed_width_lbl` otherwise it will be ignored. You should set the label/value widths wide enough to
+ provide space for all the possible values/contents of the badge you're requesting. In case the text cannot
+ fit the space given it will be clipped. The `scale` parameter still applies on the values you give to
+ `fixed_width_lbl` and `fixed_width_val`."
required: false
allowEmptyValue: false
schema:
@@ -740,12 +984,12 @@ paths:
/alarms_values:
get:
summary: Get a list of active or raised alarms on the server
- description: The alarms_values endpoint returns the list of all raised or enabled alarms on
+ description: "The alarms_values endpoint returns the list of all raised or enabled alarms on
the netdata server. Called without any parameters, the raised alarms in
- state WARNING or CRITICAL are returned. By passing "?all", all the
+ state WARNING or CRITICAL are returned. By passing '?all', all the
enabled alarms are returned.
This option output differs from `/alarms` in the number of variables delivered. This endpoint gives
- to user `id`, `value`, `last_updated` time, and alarm `status`.
+ to user `id`, `value`, `last_updated` time, and alarm `status`."
parameters:
- name: all
in: query
@@ -841,16 +1085,16 @@ paths:
memory.
/manage/health:
get:
- summary: Accesses the health management API to control health checks and
- notifications at runtime.
- description: Available from Netdata v1.12 and above, protected via bearer
+ summary: "Accesses the health management API to control health checks and
+ notifications at runtime."
+ description: "Available from Netdata v1.12 and above, protected via bearer
authorization. Especially useful for maintenance periods, the API allows
you to disable health checks completely, silence alarm notifications, or
Disable/Silence specific alarms that match selectors on alarm/template
name, chart, context, host and family. For the simple disable/silence
all scenarios, only the cmd parameter is required. The other parameters
are used to define alarm selectors. For more information and examples,
- refer to the netdata documentation.
+ refer to the netdata documentation."
parameters:
- name: cmd
in: query
@@ -904,8 +1148,8 @@ paths:
/aclk:
get:
summary: Get information about current ACLK state
- description: aclk endpoint returns detailed information
- about current state of ACLK (Agent to Cloud communication).
+ description: "ACLK endpoint returns detailed information
+ about current state of ACLK (Agent to Cloud communication)."
responses:
"200":
description: JSON object with ACLK information.
@@ -913,6 +1157,403 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/aclk_state"
+ /metric_correlations:
+ get:
+ summary: "Analyze all the metrics to find their correlations"
+ description: "THIS ENDPOINT IS OBSOLETE. Use the /weights endpoint.
+ Given two time-windows (baseline, highlight), it goes
+ through all the available metrics, querying both windows and tries to find
+ how these two windows relate to each other. It supports
+ multiple algorithms to do so. The result is a list of all
+ metrics evaluated, weighted for 0.0 (the two windows are
+ more different) to 1.0 (the two windows are similar).
+ The algorithm adjusts automatically the baseline window to be
+ a power of two multiple of the highlighted (1, 2, 4, 8, etc)."
+ parameters:
+ - name: baseline_after
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ starting point of baseline window, or a relative number of
+ seconds (negative, relative to parameter baseline_before). Netdata will
+ assume it is a relative number if it is less that 3 years (in seconds).
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: -300
+ - name: baseline_before
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ ending point of the baseline window, or a relative number of
+ seconds (negative), relative to the last collected timestamp.
+ Netdata will assume it is a relative number if it is less than 3
+ years (in seconds).
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: -60
+ - name: after
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ starting point of highlighted window, or a relative number of
+ seconds (negative, relative to parameter highlight_before). Netdata will
+ assume it is a relative number if it is less that 3 years (in seconds).
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: -60
+ - name: before
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ ending point of the highlighted window, or a relative number of
+ seconds (negative), relative to the last collected timestamp.
+ Netdata will assume it is a relative number if it is less than 3
+ years (in seconds).
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ - name: points
+ in: query
+ description: The number of points to be evaluated for the highlighted window.
+ The baseline window will be adjusted automatically to receive a proportional
+ amount of points.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: 500
+ - name: method
+ in: query
+ description: the algorithm to run
+ required: false
+ schema:
+ type: string
+ enum:
+ - ks2
+ - volume
+ default: ks2
+ - name: timeout
+ in: query
+ description: Cancel the query if to takes more that this amount of milliseconds.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: 60000
+ - name: options
+ in: query
+ description: Options that affect data generation.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: array
+ items:
+ type: string
+ enum:
+ - min2max
+ - abs
+ - absolute
+ - absolute-sum
+ - null2zero
+ - percentage
+ - unaligned
+ - allow_past
+ - nonzero
+ - anomaly-bit
+ - raw
+ default:
+ - null2zero
+ - allow_past
+ - nonzero
+ - unaligned
+ - name: group
+ in: query
+ description: The grouping method. If multiple collected values are to be grouped
+ in order to return fewer points, this parameters defines the method
+ of grouping. methods supported "min", "max", "average", "sum",
+ "incremental-sum". "max" is actually calculated on the absolute
+ value collected (so it works for both positive and negative
+ dimensions to return the most extreme value in either direction).
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: string
+ enum:
+ - min
+ - max
+ - average
+ - median
+ - stddev
+ - sum
+ - incremental-sum
+ - ses
+ - des
+ - cv
+ - countif
+ - percentile
+ - percentile25
+ - percentile50
+ - percentile75
+ - percentile80
+ - percentile90
+ - percentile95
+ - percentile97
+ - percentile98
+ - percentile99
+ - trimmed-mean
+ - trimmed-mean1
+ - trimmed-mean2
+ - trimmed-mean3
+ - trimmed-mean5
+ - trimmed-mean10
+ - trimmed-mean15
+ - trimmed-mean20
+ - trimmed-mean25
+ - trimmed-median
+ - trimmed-median1
+ - trimmed-median2
+ - trimmed-median3
+ - trimmed-median5
+ - trimmed-median10
+ - trimmed-median15
+ - trimmed-median20
+ - trimmed-median25
+ default: average
+ - name: group_options
+ in: query
+ description: When the group function supports additional parameters, this field
+ can be used to pass them to it. Currently only "countif" supports this.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ responses:
+ "200":
+ description: JSON object with weights for each chart and dimension.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/metric_correlations"
+ "400":
+ description: The given parameters are invalid.
+ "403":
+ description: metrics correlations are not enabled on this Netdata Agent.
+ "404":
+ description: No charts could be found, or the method
+ that correlated the metrics did not produce any result.
+ "504":
+ description: Timeout - the query took too long and has been cancelled.
+ /weights:
+ get:
+ summary: "Analyze all the metrics using an algorithm and score them accordingly"
+ description: "This endpoint goes through all metrics and scores them according to an algorithm."
+ parameters:
+ - name: baseline_after
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ starting point of baseline window, or a relative number of
+ seconds (negative, relative to parameter baseline_before). Netdata will
+ assume it is a relative number if it is less that 3 years (in seconds).
+ This parameter is used in KS2 and VOLUME algorithms.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: -300
+ - name: baseline_before
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ ending point of the baseline window, or a relative number of
+ seconds (negative), relative to the last collected timestamp.
+ Netdata will assume it is a relative number if it is less than 3
+ years (in seconds).
+ This parameter is used in KS2 and VOLUME algorithms.
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: -60
+ - name: after
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ starting point of highlighted window, or a relative number of
+ seconds (negative, relative to parameter highlight_before). Netdata will
+ assume it is a relative number if it is less that 3 years (in seconds).
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: -60
+ - name: before
+ in: query
+ description: This parameter can either be an absolute timestamp specifying the
+ ending point of the highlighted window, or a relative number of
+ seconds (negative), relative to the last collected timestamp.
+ Netdata will assume it is a relative number if it is less than 3
+ years (in seconds).
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: 0
+ - name: context
+ in: query
+ description: A simple pattern matching the contexts to evaluate.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ - name: points
+ in: query
+ description: The number of points to be evaluated for the highlighted window.
+ The baseline window will be adjusted automatically to receive a proportional
+ amount of points.
+ This parameter is only used by the KS2 algorithm.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: 500
+ - name: method
+ in: query
+ description: the algorithm to run
+ required: false
+ schema:
+ type: string
+ enum:
+ - ks2
+ - volume
+ - anomaly-rate
+ default: anomaly-rate
+ - name: tier
+ in: query
+ description: Use the specified database tier
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ - name: timeout
+ in: query
+ description: Cancel the query if to takes more that this amount of milliseconds.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: number
+ format: integer
+ default: 60000
+ - name: options
+ in: query
+ description: Options that affect data generation.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: array
+ items:
+ type: string
+ enum:
+ - min2max
+ - abs
+ - absolute
+ - absolute-sum
+ - null2zero
+ - percentage
+ - unaligned
+ - nonzero
+ - anomaly-bit
+ - raw
+ default:
+ - null2zero
+ - nonzero
+ - unaligned
+ - name: group
+ in: query
+ description: The grouping method. If multiple collected values are to be grouped
+ in order to return fewer points, this parameters defines the method
+ of grouping. methods supported "min", "max", "average", "sum",
+ "incremental-sum". "max" is actually calculated on the absolute
+ value collected (so it works for both positive and negative
+ dimensions to return the most extreme value in either direction).
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: string
+ enum:
+ - min
+ - max
+ - average
+ - median
+ - stddev
+ - sum
+ - incremental-sum
+ - ses
+ - des
+ - cv
+ - countif
+ - percentile
+ - percentile25
+ - percentile50
+ - percentile75
+ - percentile80
+ - percentile90
+ - percentile95
+ - percentile97
+ - percentile98
+ - percentile99
+ - trimmed-mean
+ - trimmed-mean1
+ - trimmed-mean2
+ - trimmed-mean3
+ - trimmed-mean5
+ - trimmed-mean10
+ - trimmed-mean15
+ - trimmed-mean20
+ - trimmed-mean25
+ - trimmed-median
+ - trimmed-median1
+ - trimmed-median2
+ - trimmed-median3
+ - trimmed-median5
+ - trimmed-median10
+ - trimmed-median15
+ - trimmed-median20
+ - trimmed-median25
+ default: average
+ - name: group_options
+ in: query
+ description: When the group function supports additional parameters, this field
+ can be used to pass them to it. Currently only "countif" supports this.
+ required: false
+ allowEmptyValue: false
+ schema:
+ type: string
+ responses:
+ "200":
+ description: JSON object with weights for each context, chart and dimension.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/weights"
+ "400":
+ description: The given parameters are invalid.
+ "403":
+ description: metrics correlations are not enabled on this Netdata Agent.
+ "404":
+ description: No charts could be found, or the method
+ that correlated the metrics did not produce any result.
+ "504":
+ description: Timeout - the query took too long and has been cancelled.
servers:
- url: https://registry.my-netdata.io/api/v1
- url: http://registry.my-netdata.io/api/v1
@@ -1198,6 +1839,75 @@ components:
type: number
nullable: true
description: Chart health red threshold.
+ context_summary:
+ type: object
+ properties:
+ hostname:
+ type: string
+ description: The hostname of the netdata server.
+ machine_guid:
+ type: string
+ description: The unique installation id of this netdata server.
+ node_id:
+ type: string
+ description: The unique node id of this netdata server at the hub.
+ example: nightly
+ claim_id:
+ type: string
+ description: The unique handshake id of this netdata server and the hub.
+ host_labels:
+ type: object
+ description: The host labels associated with this netdata server.
+ context:
+ type: object
+ description: "An object containing all the context objects available at the netdata server.
+ This is used as an indexed array. The key of each context object is the id of the context."
+ additionalProperties:
+ $ref: "#/components/schemas/context"
+ context:
+ type: object
+ properties:
+ version:
+ type: string
+ description: "The version of this context.
+ The number are not sequential, but bigger numbers depict a newer object."
+ hub_version:
+ type: string
+ description: The version of this context, as known by hub.
+ family:
+ type: string
+ description: "The family of the context. When multiple charts of a context have different families,
+ the netdata server replaces the different parts with [x], so that the context can have only one family."
+ title:
+ type: string
+ description: "The title of the context. When multiple charts of a context have different titles,
+ the netdata server replaces the different parts with [x], so that the context can have only one title."
+ priority:
+ type: number
+ description: "The relative priority of the context. When multiple contexts have different priorities,
+ the minimum among them is selected as the priority of the context."
+ units:
+ type: string
+ description: "The unit of measurement for the values of all dimensions of the context. If multiple charts
+ of context have different units, the latest collected is selected."
+ chart_type:
+ type: string
+ description: The chart type.
+ enum:
+ - line
+ - area
+ - stacked
+ first_time_t:
+ type: number
+ description: The UNIX timestamp of the first entry (the oldest) in the database.
+ last_time_t:
+ type: number
+ description: The UNIX timestamp of the latest entry in the database.
+ charts:
+ type: object
+ description: "An object containing all the charts available for the chart. This is used as an indexed array.
+ For each pair in the dictionary, the key is the id of the chart and the value provides all details about
+ the chart."
alarm_variables:
type: object
properties:
@@ -1669,8 +2379,9 @@ components:
properties:
aclk-available:
type: string
- description: Describes whether this agent is capable of connection to the Cloud.
- False means agent has been built without ACLK component either on purpose (user choice) or due to missing dependency.
+ description: "Describes whether this agent is capable of connection to the Cloud.
+ False means agent has been built without ACLK component either on purpose (user choice)
+ or due to missing dependency."
aclk-version:
type: integer
description: Describes which ACLK version is currently used.
@@ -1696,3 +2407,168 @@ components:
enum:
- Old
- New
+ metric_correlations:
+ type: object
+ properties:
+ after:
+ description: the start time of the highlighted window
+ type: integer
+ before:
+ description: the end time of the highlighted window
+ type: integer
+ duration:
+ description: the duration of the highlighted window
+ type: integer
+ points:
+ description: the points of the highlighted window
+ type: integer
+ baseline_after:
+ description: the start time of the baseline window
+ type: integer
+ baseline_before:
+ description: the end time of the baseline window
+ type: integer
+ baseline_duration:
+ description: the duration of the baseline window
+ type: integer
+ baseline_points:
+ description: the points of the baseline window
+ type: integer
+ group:
+ description: the grouping method across time
+ type: string
+ method:
+ description: the correlation method used
+ type: string
+ options:
+ description: a comma separated list of the query options set
+ type: string
+ correlated_dimensions:
+ description: the number of dimensions returned in the result
+ total_dimensions_count:
+ description: the total number of dimensions evaluated
+ type: integer
+ statistics:
+ type: object
+ properties:
+ query_time_ms:
+ type: number
+ db_queries:
+ type: integer
+ db_points_read:
+ type: integer
+ query_result_points:
+ type: integer
+ binary_searches:
+ type: integer
+ correlated_charts:
+ type: object
+ description: An object containing chart objects with their metrics correlations.
+ properties:
+ chart-id1:
+ type: object
+ properties:
+ context:
+ type: string
+ dimensions:
+ type: object
+ properties:
+ dimension1-name:
+ type: number
+ dimension2-name:
+ type: number
+ chart-id2:
+ type: object
+ properties:
+ context:
+ type: string
+ dimensions:
+ type: object
+ properties:
+ dimension1-name:
+ type: number
+ dimension2-name:
+ type: number
+ weights:
+ type: object
+ properties:
+ after:
+ description: the start time of the highlighted window
+ type: integer
+ before:
+ description: the end time of the highlighted window
+ type: integer
+ duration:
+ description: the duration of the highlighted window
+ type: integer
+ points:
+ description: the points of the highlighted window
+ type: integer
+ baseline_after:
+ description: the start time of the baseline window
+ type: integer
+ baseline_before:
+ description: the end time of the baseline window
+ type: integer
+ baseline_duration:
+ description: the duration of the baseline window
+ type: integer
+ baseline_points:
+ description: the points of the baseline window
+ type: integer
+ group:
+ description: the grouping method across time
+ type: string
+ method:
+ description: the correlation method used
+ type: string
+ options:
+ description: a comma separated list of the query options set
+ type: string
+ correlated_dimensions:
+ description: the number of dimensions returned in the result
+ total_dimensions_count:
+ description: the total number of dimensions evaluated
+ type: integer
+ statistics:
+ type: object
+ properties:
+ query_time_ms:
+ type: number
+ db_queries:
+ type: integer
+ db_points_read:
+ type: integer
+ query_result_points:
+ type: integer
+ binary_searches:
+ type: integer
+ contexts:
+ description: A dictionary of weighted context objects.
+ type: object
+ additionalProperties:
+ $ref: '#/components/schemas/weighted_context'
+ weighted_context:
+ type: object
+ properties:
+ weight:
+ description: The average weight of the context.
+ type: number
+ charts:
+ description: A dictionary of weighted chart objects.
+ type: object
+ additionalProperties:
+ $ref: '#/components/schemas/weighted_chart'
+ weighted_chart:
+ type: object
+ properties:
+ weight:
+ description: The average weight of the context.
+ type: number
+ dimensions:
+ description: A dictionary of weighted dimensions.
+ type: object
+ additionalProperties:
+ $ref: '#/components/schemas/weighted_dimension'
+ weighted_dimension:
+ type: number
diff --git a/web/api/queries/Makefile.am b/web/api/queries/Makefile.am
index 34bfdb85b..7c4c43520 100644
--- a/web/api/queries/Makefile.am
+++ b/web/api/queries/Makefile.am
@@ -5,14 +5,17 @@ MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
SUBDIRS = \
average \
+ countif \
des \
incremental_sum \
max \
min \
sum \
median \
+ percentile \
ses \
stddev \
+ trimmed_mean \
$(NULL)
dist_noinst_DATA = \
diff --git a/web/api/queries/average/average.c b/web/api/queries/average/average.c
index 2ed33da50..0719d57fa 100644
--- a/web/api/queries/average/average.c
+++ b/web/api/queries/average/average.c
@@ -6,12 +6,12 @@
// average
struct grouping_average {
- calculated_number sum;
+ NETDATA_DOUBLE sum;
size_t count;
};
-void grouping_create_average(RRDR *r) {
- r->internal.grouping_data = callocz(1, sizeof(struct grouping_average));
+void grouping_create_average(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_average));
}
// resets when switches dimensions
@@ -23,20 +23,20 @@ void grouping_reset_average(RRDR *r) {
}
void grouping_free_average(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_average(RRDR *r, calculated_number value) {
+void grouping_add_average(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data;
g->sum += value;
g->count++;
}
-calculated_number grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(unlikely(!g->count)) {
value = 0.0;
diff --git a/web/api/queries/average/average.h b/web/api/queries/average/average.h
index 23ecfac6f..55c51722c 100644
--- a/web/api/queries/average/average.h
+++ b/web/api/queries/average/average.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_average(RRDR *r);
+extern void grouping_create_average(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_average(RRDR *r);
extern void grouping_free_average(RRDR *r);
-extern void grouping_add_average(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_average(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_AVERAGE_H
diff --git a/web/api/queries/countif/Makefile.am b/web/api/queries/countif/Makefile.am
new file mode 100644
index 000000000..161784b8f
--- /dev/null
+++ b/web/api/queries/countif/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/countif/README.md b/web/api/queries/countif/README.md
new file mode 100644
index 000000000..200a4c9ed
--- /dev/null
+++ b/web/api/queries/countif/README.md
@@ -0,0 +1,36 @@
+<!--
+title: "CountIf"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/countif/README.md
+-->
+
+# CountIf
+
+> This query is available as `countif`.
+
+CountIf returns the percentage of points in the database that satisfy the condition supplied.
+
+The following conditions are available:
+
+- `!` or `!=` or `<>`, different than
+- `=` or `:`, equal to
+- `>`, greater than
+- `<`, less than
+- `>=`, greater or equal to
+- `<=`, less or equal to
+
+The target number and the desired condition can be set using the `group_options` query parameter, as a string, like in these examples:
+
+- `!0`, to match any number except zero.
+- `>=-3` to match any number bigger or equal to -3.
+
+. When an invalid condition is given, the web server can deliver a not accurate response.
+
+## how to use
+
+This query cannot be used in alarms.
+
+`countif` changes the units of charts. The result of the calculation is always from zero to 1, expressing the percentage of database points that matched the condition.
+
+In APIs and badges can be used like this: `&group=countif&group_options=>10` in the URL.
+
+
diff --git a/web/api/queries/countif/countif.c b/web/api/queries/countif/countif.c
new file mode 100644
index 000000000..369b20be9
--- /dev/null
+++ b/web/api/queries/countif/countif.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "countif.h"
+
+// ----------------------------------------------------------------------------
+// countif
+
+struct grouping_countif {
+ size_t (*comparison)(NETDATA_DOUBLE, NETDATA_DOUBLE);
+ NETDATA_DOUBLE target;
+ size_t count;
+ size_t matched;
+};
+
+static size_t countif_equal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v == target);
+}
+
+static size_t countif_notequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v != target);
+}
+
+static size_t countif_less(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v < target);
+}
+
+static size_t countif_lessequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v <= target);
+}
+
+static size_t countif_greater(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v > target);
+}
+
+static size_t countif_greaterequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v >= target);
+}
+
+void grouping_create_countif(RRDR *r, const char *options __maybe_unused) {
+ struct grouping_countif *g = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_countif));
+ r->internal.grouping_data = g;
+
+ if(options && *options) {
+ // skip any leading spaces
+ while(isspace(*options)) options++;
+
+ // find the comparison function
+ switch(*options) {
+ case '!':
+ options++;
+ if(*options != '=' && *options != ':')
+ options--;
+ g->comparison = countif_notequal;
+ break;
+
+ case '>':
+ options++;
+ if(*options == '=' || *options == ':') {
+ g->comparison = countif_greaterequal;
+ }
+ else {
+ options--;
+ g->comparison = countif_greater;
+ }
+ break;
+
+ case '<':
+ options++;
+ if(*options == '>') {
+ g->comparison = countif_notequal;
+ }
+ else if(*options == '=' || *options == ':') {
+ g->comparison = countif_lessequal;
+ }
+ else {
+ options--;
+ g->comparison = countif_less;
+ }
+ break;
+
+ default:
+ case '=':
+ case ':':
+ g->comparison = countif_equal;
+ break;
+ }
+ if(*options) options++;
+
+ // skip everything up to the first digit
+ while(isspace(*options)) options++;
+
+ g->target = str2ndd(options, NULL);
+ }
+ else {
+ g->target = 0.0;
+ g->comparison = countif_equal;
+ }
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_countif(RRDR *r) {
+ struct grouping_countif *g = (struct grouping_countif *)r->internal.grouping_data;
+ g->matched = 0;
+ g->count = 0;
+}
+
+void grouping_free_countif(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_countif(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_countif *g = (struct grouping_countif *)r->internal.grouping_data;
+ g->matched += g->comparison(value, g->target);
+ g->count++;
+}
+
+NETDATA_DOUBLE grouping_flush_countif(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_countif *g = (struct grouping_countif *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = (NETDATA_DOUBLE)g->matched * 100 / (NETDATA_DOUBLE)g->count;
+ }
+
+ g->matched = 0;
+ g->count = 0;
+
+ return value;
+}
diff --git a/web/api/queries/countif/countif.h b/web/api/queries/countif/countif.h
new file mode 100644
index 000000000..0c7d2d7d1
--- /dev/null
+++ b/web/api/queries/countif/countif.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERY_COUNTIF_H
+#define NETDATA_API_QUERY_COUNTIF_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void grouping_create_countif(RRDR *r, const char *options __maybe_unused);
+extern void grouping_reset_countif(RRDR *r);
+extern void grouping_free_countif(RRDR *r);
+extern void grouping_add_countif(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_countif(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERY_COUNTIF_H
diff --git a/web/api/queries/des/des.c b/web/api/queries/des/des.c
index 8e4ca4bd4..a6c4e4051 100644
--- a/web/api/queries/des/des.c
+++ b/web/api/queries/des/des.c
@@ -8,13 +8,13 @@
// single exponential smoothing
struct grouping_des {
- calculated_number alpha;
- calculated_number alpha_other;
- calculated_number beta;
- calculated_number beta_other;
+ NETDATA_DOUBLE alpha;
+ NETDATA_DOUBLE alpha_other;
+ NETDATA_DOUBLE beta;
+ NETDATA_DOUBLE beta_other;
- calculated_number level;
- calculated_number trend;
+ NETDATA_DOUBLE level;
+ NETDATA_DOUBLE trend;
size_t count;
};
@@ -31,22 +31,22 @@ void grouping_init_des(void) {
}
}
-static inline calculated_number window(RRDR *r, struct grouping_des *g) {
+static inline NETDATA_DOUBLE window(RRDR *r, struct grouping_des *g) {
(void)g;
- calculated_number points;
+ NETDATA_DOUBLE points;
if(r->group == 1) {
// provide a running DES
- points = r->internal.points_wanted;
+ points = (NETDATA_DOUBLE)r->internal.points_wanted;
}
else {
// provide a SES with flush points
- points = r->group;
+ points = (NETDATA_DOUBLE)r->group;
}
// https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
// A commonly used value for alpha is 2 / (N + 1)
- return (points > max_window_size) ? max_window_size : points;
+ return (points > (NETDATA_DOUBLE)max_window_size) ? (NETDATA_DOUBLE)max_window_size : points;
}
static inline void set_alpha(RRDR *r, struct grouping_des *g) {
@@ -69,8 +69,8 @@ static inline void set_beta(RRDR *r, struct grouping_des *g) {
//info("beta for chart '%s' is " CALCULATED_NUMBER_FORMAT, r->st->name, g->beta);
}
-void grouping_create_des(RRDR *r) {
- struct grouping_des *g = (struct grouping_des *)mallocz(sizeof(struct grouping_des));
+void grouping_create_des(RRDR *r, const char *options __maybe_unused) {
+ struct grouping_des *g = (struct grouping_des *)onewayalloc_mallocz(r->internal.owa, sizeof(struct grouping_des));
set_alpha(r, g);
set_beta(r, g);
g->level = 0.0;
@@ -92,11 +92,11 @@ void grouping_reset_des(RRDR *r) {
}
void grouping_free_des(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_des(RRDR *r, calculated_number value) {
+void grouping_add_des(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data;
if(likely(g->count > 0)) {
@@ -109,7 +109,7 @@ void grouping_add_des(RRDR *r, calculated_number value) {
}
// for the values, except the first
- calculated_number last_level = g->level;
+ NETDATA_DOUBLE last_level = g->level;
g->level = (g->alpha * value) + (g->alpha_other * (g->level + g->trend));
g->trend = (g->beta * (g->level - last_level)) + (g->beta_other * g->trend);
}
@@ -123,10 +123,10 @@ void grouping_add_des(RRDR *r, calculated_number value) {
//fprintf(stderr, "value: " CALCULATED_NUMBER_FORMAT ", level: " CALCULATED_NUMBER_FORMAT ", trend: " CALCULATED_NUMBER_FORMAT "\n", value, g->level, g->trend);
}
-calculated_number grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data;
- if(unlikely(!g->count || !calculated_number_isnumber(g->level))) {
+ if(unlikely(!g->count || !netdata_double_isnumber(g->level))) {
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
return 0.0;
}
diff --git a/web/api/queries/des/des.h b/web/api/queries/des/des.h
index bd361b865..8906d14eb 100644
--- a/web/api/queries/des/des.h
+++ b/web/api/queries/des/des.h
@@ -8,10 +8,10 @@
extern void grouping_init_des(void);
-extern void grouping_create_des(RRDR *r);
+extern void grouping_create_des(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_des(RRDR *r);
extern void grouping_free_des(RRDR *r);
-extern void grouping_add_des(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_des(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERIES_DES_H
diff --git a/web/api/queries/incremental_sum/incremental_sum.c b/web/api/queries/incremental_sum/incremental_sum.c
index 304d9aa74..afca530c3 100644
--- a/web/api/queries/incremental_sum/incremental_sum.c
+++ b/web/api/queries/incremental_sum/incremental_sum.c
@@ -6,13 +6,13 @@
// incremental sum
struct grouping_incremental_sum {
- calculated_number first;
- calculated_number last;
+ NETDATA_DOUBLE first;
+ NETDATA_DOUBLE last;
size_t count;
};
-void grouping_create_incremental_sum(RRDR *r) {
- r->internal.grouping_data = callocz(1, sizeof(struct grouping_incremental_sum));
+void grouping_create_incremental_sum(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_incremental_sum));
}
// resets when switches dimensions
@@ -25,11 +25,11 @@ void grouping_reset_incremental_sum(RRDR *r) {
}
void grouping_free_incremental_sum(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_incremental_sum(RRDR *r, calculated_number value) {
+void grouping_add_incremental_sum(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data;
if(unlikely(!g->count)) {
@@ -42,10 +42,10 @@ void grouping_add_incremental_sum(RRDR *r, calculated_number value) {
}
}
-calculated_number grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(unlikely(!g->count)) {
value = 0.0;
diff --git a/web/api/queries/incremental_sum/incremental_sum.h b/web/api/queries/incremental_sum/incremental_sum.h
index 5b55ad3c8..6d908cef6 100644
--- a/web/api/queries/incremental_sum/incremental_sum.h
+++ b/web/api/queries/incremental_sum/incremental_sum.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_incremental_sum(RRDR *r);
+extern void grouping_create_incremental_sum(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_incremental_sum(RRDR *r);
extern void grouping_free_incremental_sum(RRDR *r);
-extern void grouping_add_incremental_sum(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_incremental_sum(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_INCREMENTAL_SUM_H
diff --git a/web/api/queries/max/max.c b/web/api/queries/max/max.c
index b6e723314..73cf9fa66 100644
--- a/web/api/queries/max/max.c
+++ b/web/api/queries/max/max.c
@@ -6,12 +6,12 @@
// max
struct grouping_max {
- calculated_number max;
+ NETDATA_DOUBLE max;
size_t count;
};
-void grouping_create_max(RRDR *r) {
- r->internal.grouping_data = callocz(1, sizeof(struct grouping_max));
+void grouping_create_max(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_max));
}
// resets when switches dimensions
@@ -23,23 +23,23 @@ void grouping_reset_max(RRDR *r) {
}
void grouping_free_max(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_max(RRDR *r, calculated_number value) {
+void grouping_add_max(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data;
- if(!g->count || calculated_number_fabs(value) > calculated_number_fabs(g->max)) {
+ if(!g->count || fabsndd(value) > fabsndd(g->max)) {
g->max = value;
g->count++;
}
}
-calculated_number grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(unlikely(!g->count)) {
value = 0.0;
diff --git a/web/api/queries/max/max.h b/web/api/queries/max/max.h
index 7b606ce34..28913686b 100644
--- a/web/api/queries/max/max.h
+++ b/web/api/queries/max/max.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_max(RRDR *r);
+extern void grouping_create_max(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_max(RRDR *r);
extern void grouping_free_max(RRDR *r);
-extern void grouping_add_max(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_max(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_MAX_H
diff --git a/web/api/queries/median/README.md b/web/api/queries/median/README.md
index bb7d4c66b..5600284c2 100644
--- a/web/api/queries/median/README.md
+++ b/web/api/queries/median/README.md
@@ -13,6 +13,20 @@ The median is the value separating the higher half from the lower half of a data
`median` is not an accurate average. However, it eliminates all spikes, by sorting
all the values in a period, and selecting the value in the middle of the sorted array.
+Netdata also supports `trimmed-median`, which trims a percentage of the smaller and bigger values prior to finding the
+median. The following `trimmed-median` functions are defined:
+
+- `trimmed-median1`
+- `trimmed-median2`
+- `trimmed-median3`
+- `trimmed-median5`
+- `trimmed-median10`
+- `trimmed-median15`
+- `trimmed-median20`
+- `trimmed-median25`
+
+The function `trimmed-median` is an alias for `trimmed-median5`.
+
## how to use
Use it in alarms like this:
@@ -27,7 +41,8 @@ lookup: median -1m unaligned of my_dimension
`median` does not change the units. For example, if the chart units is `requests/sec`, the result
will be again expressed in the same units.
-It can also be used in APIs and badges as `&group=median` in the URL.
+It can also be used in APIs and badges as `&group=median` in the URL. Additionally, a percentage may be given with
+`&group_options=` to trim all small and big values before finding the median.
## Examples
diff --git a/web/api/queries/median/median.c b/web/api/queries/median/median.c
index bffcee12f..40fd4ec3a 100644
--- a/web/api/queries/median/median.c
+++ b/web/api/queries/median/median.c
@@ -2,27 +2,65 @@
#include "median.h"
-
// ----------------------------------------------------------------------------
// median
struct grouping_median {
size_t series_size;
size_t next_pos;
+ NETDATA_DOUBLE percent;
- LONG_DOUBLE series[];
+ NETDATA_DOUBLE *series;
};
-void grouping_create_median(RRDR *r) {
+void grouping_create_median_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
long entries = r->group;
- if(entries < 0) entries = 0;
+ if(entries < 10) entries = 10;
- struct grouping_median *g = (struct grouping_median *)callocz(1, sizeof(struct grouping_median) + entries * sizeof(LONG_DOUBLE));
+ struct grouping_median *g = (struct grouping_median *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_median));
+ g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
g->series_size = (size_t)entries;
+ g->percent = def;
+ if(options && *options) {
+ g->percent = str2ndd(options, NULL);
+ if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
+ if(g->percent < 0.0) g->percent = 0.0;
+ if(g->percent > 50.0) g->percent = 50.0;
+ }
+
+ g->percent = g->percent / 100.0;
r->internal.grouping_data = g;
}
+void grouping_create_median(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 0.0);
+}
+void grouping_create_trimmed_median1(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 1.0);
+}
+void grouping_create_trimmed_median2(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 2.0);
+}
+void grouping_create_trimmed_median3(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 3.0);
+}
+void grouping_create_trimmed_median5(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 5.0);
+}
+void grouping_create_trimmed_median10(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 10.0);
+}
+void grouping_create_trimmed_median15(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 15.0);
+}
+void grouping_create_trimmed_median20(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 20.0);
+}
+void grouping_create_trimmed_median25(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 25.0);
+}
+
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_median(RRDR *r) {
@@ -31,47 +69,72 @@ void grouping_reset_median(RRDR *r) {
}
void grouping_free_median(RRDR *r) {
- freez(r->internal.grouping_data);
+ struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
+ if(g) onewayalloc_freez(r->internal.owa, g->series);
+
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_median(RRDR *r, calculated_number value) {
+void grouping_add_median(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
if(unlikely(g->next_pos >= g->series_size)) {
- error("INTERNAL ERROR: median buffer overflow on chart '%s' - next_pos = %zu, series_size = %zu, r->group = %ld.", r->st->name, g->next_pos, g->series_size, r->group);
+ g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
+ g->series_size *= 2;
}
- else
- g->series[g->next_pos++] = (LONG_DOUBLE)value;
+
+ g->series[g->next_pos++] = value;
}
-calculated_number grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
- calculated_number value;
+ size_t available_slots = g->next_pos;
+ NETDATA_DOUBLE value;
- if(unlikely(!g->next_pos)) {
+ if(unlikely(!available_slots)) {
value = 0.0;
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
}
+ else if(available_slots == 1) {
+ value = g->series[0];
+ }
else {
- if(g->next_pos > 1) {
- sort_series(g->series, g->next_pos);
- value = (calculated_number)median_on_sorted_series(g->series, g->next_pos);
- }
- else
- value = (calculated_number)g->series[0];
+ sort_series(g->series, available_slots);
+
+ size_t start_slot = 0;
+ size_t end_slot = available_slots - 1;
+
+ if(g->percent > 0.0) {
+ NETDATA_DOUBLE min = g->series[0];
+ NETDATA_DOUBLE max = g->series[available_slots - 1];
+ NETDATA_DOUBLE delta = (max - min) * g->percent;
+
+ NETDATA_DOUBLE wanted_min = min + delta;
+ NETDATA_DOUBLE wanted_max = max - delta;
+
+ for (start_slot = 0; start_slot < available_slots; start_slot++)
+ if (g->series[start_slot] >= wanted_min) break;
- if(!calculated_number_isnumber(value)) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ for (end_slot = available_slots - 1; end_slot > start_slot; end_slot--)
+ if (g->series[end_slot] <= wanted_max) break;
}
- //log_series_to_stderr(g->series, g->next_pos, value, "median");
+ if(start_slot == end_slot)
+ value = g->series[start_slot];
+ else
+ value = median_on_sorted_series(&g->series[start_slot], end_slot - start_slot + 1);
}
+ if(unlikely(!netdata_double_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ //log_series_to_stderr(g->series, g->next_pos, value, "median");
+
g->next_pos = 0;
return value;
}
-
diff --git a/web/api/queries/median/median.h b/web/api/queries/median/median.h
index 28d52b31e..dd1b3de61 100644
--- a/web/api/queries/median/median.h
+++ b/web/api/queries/median/median.h
@@ -6,10 +6,18 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_median(RRDR *r);
+extern void grouping_create_median(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median1(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median2(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median3(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median5(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median10(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median15(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median20(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median25(RRDR *r, const char *options);
extern void grouping_reset_median(RRDR *r);
extern void grouping_free_median(RRDR *r);
-extern void grouping_add_median(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_median(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERIES_MEDIAN_H
diff --git a/web/api/queries/min/min.c b/web/api/queries/min/min.c
index 497bae04d..1752e9e0c 100644
--- a/web/api/queries/min/min.c
+++ b/web/api/queries/min/min.c
@@ -6,12 +6,12 @@
// min
struct grouping_min {
- calculated_number min;
+ NETDATA_DOUBLE min;
size_t count;
};
-void grouping_create_min(RRDR *r) {
- r->internal.grouping_data = callocz(1, sizeof(struct grouping_min));
+void grouping_create_min(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_min));
}
// resets when switches dimensions
@@ -23,23 +23,23 @@ void grouping_reset_min(RRDR *r) {
}
void grouping_free_min(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_min(RRDR *r, calculated_number value) {
+void grouping_add_min(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data;
- if(!g->count || calculated_number_fabs(value) < calculated_number_fabs(g->min)) {
+ if(!g->count || fabsndd(value) < fabsndd(g->min)) {
g->min = value;
g->count++;
}
}
-calculated_number grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(unlikely(!g->count)) {
value = 0.0;
diff --git a/web/api/queries/min/min.h b/web/api/queries/min/min.h
index 9207c74f7..b8627f667 100644
--- a/web/api/queries/min/min.h
+++ b/web/api/queries/min/min.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_min(RRDR *r);
+extern void grouping_create_min(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_min(RRDR *r);
extern void grouping_free_min(RRDR *r);
-extern void grouping_add_min(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_min(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_MIN_H
diff --git a/web/api/queries/percentile/Makefile.am b/web/api/queries/percentile/Makefile.am
new file mode 100644
index 000000000..161784b8f
--- /dev/null
+++ b/web/api/queries/percentile/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/percentile/README.md b/web/api/queries/percentile/README.md
new file mode 100644
index 000000000..70afc7420
--- /dev/null
+++ b/web/api/queries/percentile/README.md
@@ -0,0 +1,58 @@
+<!--
+title: "Percentile"
+description: "Use percentile in API queries and health entities to find the 'percentile' value from a sample, eliminating any unwanted spikes in the returned metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/percentile/README.md
+-->
+
+# Percentile
+
+The percentile is the average value of a series using only the smaller N percentile of the values.
+(a population or a probability distribution).
+
+Netdata applies linear interpolation on the last point, if the percentile requested does not give a round number of
+points.
+
+The following percentile aliases are defined:
+
+- `percentile25`
+- `percentile50`
+- `percentile75`
+- `percentile80`
+- `percentile90`
+- `percentile95`
+- `percentile97`
+- `percentile98`
+- `percentile99`
+
+The default `percentile` is an alias for `percentile95`.
+Any percentile may be requested using the `group_options` query parameter.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: percentile95 -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`percentile` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=percentile` in the URL and the additional parameter `group_options`
+may be used to request any percentile (e.g. `&group=percentile&group_options=96`).
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=percentile95&after=-60&label=percentile95&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Percentile>.
diff --git a/web/api/queries/percentile/percentile.c b/web/api/queries/percentile/percentile.c
new file mode 100644
index 000000000..88f8600dd
--- /dev/null
+++ b/web/api/queries/percentile/percentile.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "percentile.h"
+
+// ----------------------------------------------------------------------------
+// median
+
+struct grouping_percentile {
+ size_t series_size;
+ size_t next_pos;
+ NETDATA_DOUBLE percent;
+
+ NETDATA_DOUBLE *series;
+};
+
+static void grouping_create_percentile_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
+ long entries = r->group;
+ if(entries < 10) entries = 10;
+
+ struct grouping_percentile *g = (struct grouping_percentile *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_percentile));
+ g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
+ g->series_size = (size_t)entries;
+
+ g->percent = def;
+ if(options && *options) {
+ g->percent = str2ndd(options, NULL);
+ if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
+ if(g->percent < 0.0) g->percent = 0.0;
+ if(g->percent > 100.0) g->percent = 100.0;
+ }
+
+ g->percent = g->percent / 100.0;
+ r->internal.grouping_data = g;
+}
+
+void grouping_create_percentile25(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 25.0);
+}
+void grouping_create_percentile50(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 50.0);
+}
+void grouping_create_percentile75(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 75.0);
+}
+void grouping_create_percentile80(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 80.0);
+}
+void grouping_create_percentile90(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 90.0);
+}
+void grouping_create_percentile95(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 95.0);
+}
+void grouping_create_percentile97(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 97.0);
+}
+void grouping_create_percentile98(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 98.0);
+}
+void grouping_create_percentile99(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 99.0);
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_percentile(RRDR *r) {
+ struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
+ g->next_pos = 0;
+}
+
+void grouping_free_percentile(RRDR *r) {
+ struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
+ if(g) onewayalloc_freez(r->internal.owa, g->series);
+
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_percentile(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
+
+ if(unlikely(g->next_pos >= g->series_size)) {
+ g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
+ g->series_size *= 2;
+ }
+
+ g->series[g->next_pos++] = value;
+}
+
+NETDATA_DOUBLE grouping_flush_percentile(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+ size_t available_slots = g->next_pos;
+
+ if(unlikely(!available_slots)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else if(available_slots == 1) {
+ value = g->series[0];
+ }
+ else {
+ sort_series(g->series, available_slots);
+
+ NETDATA_DOUBLE min = g->series[0];
+ NETDATA_DOUBLE max = g->series[available_slots - 1];
+
+ if (min != max) {
+ size_t slots_to_use = (size_t)((NETDATA_DOUBLE)available_slots * g->percent);
+ if(!slots_to_use) slots_to_use = 1;
+
+ NETDATA_DOUBLE percent_to_use = (NETDATA_DOUBLE)slots_to_use / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_delta = g->percent - percent_to_use;
+
+ NETDATA_DOUBLE percent_interpolation_slot = 0.0;
+ NETDATA_DOUBLE percent_last_slot = 0.0;
+ if(percent_delta > 0.0) {
+ NETDATA_DOUBLE percent_to_use_plus_1_slot = (NETDATA_DOUBLE)(slots_to_use + 1) / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_1slot = percent_to_use_plus_1_slot - percent_to_use;
+
+ percent_interpolation_slot = percent_delta / percent_1slot;
+ percent_last_slot = 1 - percent_interpolation_slot;
+ }
+
+ int start_slot, stop_slot, step, last_slot, interpolation_slot;
+ if(min >= 0.0 && max >= 0.0) {
+ start_slot = 0;
+ stop_slot = start_slot + (int)slots_to_use;
+ last_slot = stop_slot - 1;
+ interpolation_slot = stop_slot;
+ step = 1;
+ }
+ else {
+ start_slot = (int)available_slots - 1;
+ stop_slot = start_slot - (int)slots_to_use;
+ last_slot = stop_slot + 1;
+ interpolation_slot = stop_slot;
+ step = -1;
+ }
+
+ value = 0.0;
+ for(int slot = start_slot; slot != stop_slot ; slot += step)
+ value += g->series[slot];
+
+ size_t counted = slots_to_use;
+ if(percent_interpolation_slot > 0.0 && interpolation_slot >= 0 && interpolation_slot < (int)available_slots) {
+ value += g->series[interpolation_slot] * percent_interpolation_slot;
+ value += g->series[last_slot] * percent_last_slot;
+ counted++;
+ }
+
+ value = value / (NETDATA_DOUBLE)counted;
+ }
+ else
+ value = min;
+ }
+
+ if(unlikely(!netdata_double_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ //log_series_to_stderr(g->series, g->next_pos, value, "percentile");
+
+ g->next_pos = 0;
+
+ return value;
+}
diff --git a/web/api/queries/percentile/percentile.h b/web/api/queries/percentile/percentile.h
new file mode 100644
index 000000000..709717ebd
--- /dev/null
+++ b/web/api/queries/percentile/percentile.h
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERIES_PERCENTILE_H
+#define NETDATA_API_QUERIES_PERCENTILE_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void grouping_create_percentile25(RRDR *r, const char *options);
+extern void grouping_create_percentile50(RRDR *r, const char *options);
+extern void grouping_create_percentile75(RRDR *r, const char *options);
+extern void grouping_create_percentile80(RRDR *r, const char *options);
+extern void grouping_create_percentile90(RRDR *r, const char *options);
+extern void grouping_create_percentile95(RRDR *r, const char *options);
+extern void grouping_create_percentile97(RRDR *r, const char *options);
+extern void grouping_create_percentile98(RRDR *r, const char *options);
+extern void grouping_create_percentile99(RRDR *r, const char *options );
+extern void grouping_reset_percentile(RRDR *r);
+extern void grouping_free_percentile(RRDR *r);
+extern void grouping_add_percentile(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_percentile(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERIES_PERCENTILE_H
diff --git a/web/api/queries/query.c b/web/api/queries/query.c
index 5c6c70411..d776f6d11 100644
--- a/web/api/queries/query.c
+++ b/web/api/queries/query.c
@@ -3,9 +3,9 @@
#include "query.h"
#include "web/api/formatters/rrd2json.h"
#include "rrdr.h"
-#include "database/ram/rrddim_mem.h"
#include "average/average.h"
+#include "countif/countif.h"
#include "incremental_sum/incremental_sum.h"
#include "max/max.h"
#include "median/median.h"
@@ -14,6 +14,8 @@
#include "stddev/stddev.h"
#include "ses/ses.h"
#include "des/des.h"
+#include "percentile/percentile.h"
+#include "trimmed_mean/trimmed_mean.h"
// ----------------------------------------------------------------------------
@@ -28,7 +30,7 @@ static struct {
// Allocate all required structures for a query.
// This is called once for each netdata query.
- void (*create)(struct rrdresult *r);
+ void (*create)(struct rrdresult *r, const char *options);
// Cleanup collected values, but don't destroy the structures.
// This is called when the query engine switches dimensions,
@@ -40,7 +42,7 @@ static struct {
// Add a single value into the calculation.
// The module may decide to cache it, or use it in the fly.
- void (*add)(struct rrdresult *r, calculated_number value);
+ void (*add)(struct rrdresult *r, NETDATA_DOUBLE value);
// Generate a single result for the values added so far.
// More values and points may be requested later.
@@ -48,7 +50,9 @@ static struct {
// when flushing it (so for a few modules it may be better to
// continue after a flush as if nothing changed, for others a
// cleanup of the internal structures may be required).
- calculated_number (*flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+ NETDATA_DOUBLE (*flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+ TIER_QUERY_FETCH tier_query_fetch;
} api_v1_data_groups[] = {
{.name = "average",
.hash = 0,
@@ -58,7 +62,8 @@ static struct {
.reset = grouping_reset_average,
.free = grouping_free_average,
.add = grouping_add_average,
- .flush = grouping_flush_average
+ .flush = grouping_flush_average,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "mean", // alias on 'average'
.hash = 0,
@@ -68,7 +73,107 @@ static struct {
.reset = grouping_reset_average,
.free = grouping_free_average,
.add = grouping_add_average,
- .flush = grouping_flush_average
+ .flush = grouping_flush_average,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean1",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN1,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean1,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean2",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN2,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean2,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean3",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN3,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean3,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean5",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN5,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean5,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean10",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN10,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean10,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean15",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN15,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean15,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean20",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN20,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean20,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean25",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN25,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean25,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN5,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean5,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "incremental_sum",
.hash = 0,
@@ -78,7 +183,8 @@ static struct {
.reset = grouping_reset_incremental_sum,
.free = grouping_free_incremental_sum,
.add = grouping_add_incremental_sum,
- .flush = grouping_flush_incremental_sum
+ .flush = grouping_flush_incremental_sum,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "incremental-sum",
.hash = 0,
@@ -88,7 +194,8 @@ static struct {
.reset = grouping_reset_incremental_sum,
.free = grouping_free_incremental_sum,
.add = grouping_add_incremental_sum,
- .flush = grouping_flush_incremental_sum
+ .flush = grouping_flush_incremental_sum,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "median",
.hash = 0,
@@ -98,7 +205,217 @@ static struct {
.reset = grouping_reset_median,
.free = grouping_free_median,
.add = grouping_add_median,
- .flush = grouping_flush_median
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median1",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN1,
+ .init = NULL,
+ .create= grouping_create_trimmed_median1,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median2",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN2,
+ .init = NULL,
+ .create= grouping_create_trimmed_median2,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median3",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN3,
+ .init = NULL,
+ .create= grouping_create_trimmed_median3,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median5",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN5,
+ .init = NULL,
+ .create= grouping_create_trimmed_median5,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median10",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN10,
+ .init = NULL,
+ .create= grouping_create_trimmed_median10,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median15",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN15,
+ .init = NULL,
+ .create= grouping_create_trimmed_median15,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median20",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN20,
+ .init = NULL,
+ .create= grouping_create_trimmed_median20,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median25",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN25,
+ .init = NULL,
+ .create= grouping_create_trimmed_median25,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN5,
+ .init = NULL,
+ .create= grouping_create_trimmed_median5,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile25",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE25,
+ .init = NULL,
+ .create= grouping_create_percentile25,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile50",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE50,
+ .init = NULL,
+ .create= grouping_create_percentile50,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile75",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE75,
+ .init = NULL,
+ .create= grouping_create_percentile75,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile80",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE80,
+ .init = NULL,
+ .create= grouping_create_percentile80,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile90",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE90,
+ .init = NULL,
+ .create= grouping_create_percentile90,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile95",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE95,
+ .init = NULL,
+ .create= grouping_create_percentile95,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile97",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE97,
+ .init = NULL,
+ .create= grouping_create_percentile97,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile98",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE98,
+ .init = NULL,
+ .create= grouping_create_percentile98,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile99",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE99,
+ .init = NULL,
+ .create= grouping_create_percentile99,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE95,
+ .init = NULL,
+ .create= grouping_create_percentile95,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "min",
.hash = 0,
@@ -108,7 +425,8 @@ static struct {
.reset = grouping_reset_min,
.free = grouping_free_min,
.add = grouping_add_min,
- .flush = grouping_flush_min
+ .flush = grouping_flush_min,
+ .tier_query_fetch = TIER_QUERY_FETCH_MIN
},
{.name = "max",
.hash = 0,
@@ -118,7 +436,8 @@ static struct {
.reset = grouping_reset_max,
.free = grouping_free_max,
.add = grouping_add_max,
- .flush = grouping_flush_max
+ .flush = grouping_flush_max,
+ .tier_query_fetch = TIER_QUERY_FETCH_MAX
},
{.name = "sum",
.hash = 0,
@@ -128,7 +447,8 @@ static struct {
.reset = grouping_reset_sum,
.free = grouping_free_sum,
.add = grouping_add_sum,
- .flush = grouping_flush_sum
+ .flush = grouping_flush_sum,
+ .tier_query_fetch = TIER_QUERY_FETCH_SUM
},
// standard deviation
@@ -140,7 +460,8 @@ static struct {
.reset = grouping_reset_stddev,
.free = grouping_free_stddev,
.add = grouping_add_stddev,
- .flush = grouping_flush_stddev
+ .flush = grouping_flush_stddev,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "cv", // coefficient variation is calculated by stddev
.hash = 0,
@@ -150,7 +471,8 @@ static struct {
.reset = grouping_reset_stddev, // not an error, stddev calculates this too
.free = grouping_free_stddev, // not an error, stddev calculates this too
.add = grouping_add_stddev, // not an error, stddev calculates this too
- .flush = grouping_flush_coefficient_of_variation
+ .flush = grouping_flush_coefficient_of_variation,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "rsd", // alias of 'cv'
.hash = 0,
@@ -160,7 +482,8 @@ static struct {
.reset = grouping_reset_stddev, // not an error, stddev calculates this too
.free = grouping_free_stddev, // not an error, stddev calculates this too
.add = grouping_add_stddev, // not an error, stddev calculates this too
- .flush = grouping_flush_coefficient_of_variation
+ .flush = grouping_flush_coefficient_of_variation,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
/*
@@ -172,7 +495,8 @@ static struct {
.reset = grouping_reset_stddev,
.free = grouping_free_stddev,
.add = grouping_add_stddev,
- .flush = grouping_flush_mean
+ .flush = grouping_flush_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
*/
@@ -185,7 +509,8 @@ static struct {
.reset = grouping_reset_stddev,
.free = grouping_free_stddev,
.add = grouping_add_stddev,
- .flush = grouping_flush_variance
+ .flush = grouping_flush_variance,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
*/
@@ -193,44 +518,60 @@ static struct {
{.name = "ses",
.hash = 0,
.value = RRDR_GROUPING_SES,
- .init = grouping_init_ses,
+ .init = grouping_init_ses,
.create= grouping_create_ses,
.reset = grouping_reset_ses,
.free = grouping_free_ses,
.add = grouping_add_ses,
- .flush = grouping_flush_ses
+ .flush = grouping_flush_ses,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "ema", // alias for 'ses'
.hash = 0,
.value = RRDR_GROUPING_SES,
- .init = NULL,
+ .init = NULL,
.create= grouping_create_ses,
.reset = grouping_reset_ses,
.free = grouping_free_ses,
.add = grouping_add_ses,
- .flush = grouping_flush_ses
+ .flush = grouping_flush_ses,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "ewma", // alias for ses
.hash = 0,
.value = RRDR_GROUPING_SES,
- .init = NULL,
+ .init = NULL,
.create= grouping_create_ses,
.reset = grouping_reset_ses,
.free = grouping_free_ses,
.add = grouping_add_ses,
- .flush = grouping_flush_ses
+ .flush = grouping_flush_ses,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
// double exponential smoothing
{.name = "des",
.hash = 0,
.value = RRDR_GROUPING_DES,
- .init = grouping_init_des,
+ .init = grouping_init_des,
.create= grouping_create_des,
.reset = grouping_reset_des,
.free = grouping_free_des,
.add = grouping_add_des,
- .flush = grouping_flush_des
+ .flush = grouping_flush_des,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+
+ {.name = "countif",
+ .hash = 0,
+ .value = RRDR_GROUPING_COUNTIF,
+ .init = NULL,
+ .create= grouping_create_countif,
+ .reset = grouping_reset_countif,
+ .free = grouping_free_countif,
+ .add = grouping_add_countif,
+ .flush = grouping_flush_countif,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
// terminator
@@ -242,7 +583,8 @@ static struct {
.reset = grouping_reset_average,
.free = grouping_free_average,
.add = grouping_add_average,
- .flush = grouping_flush_average
+ .flush = grouping_flush_average,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
}
};
@@ -280,6 +622,41 @@ RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPI
return def;
}
+const char *web_client_api_request_v1_data_group_to_string(RRDR_GROUPING group) {
+ int i;
+
+ for(i = 0; api_v1_data_groups[i].name ; i++)
+ if(unlikely(group == api_v1_data_groups[i].value))
+ return api_v1_data_groups[i].name;
+
+ return "unknown";
+}
+
+static void rrdr_set_grouping_function(RRDR *r, RRDR_GROUPING group_method) {
+ int i, found = 0;
+ for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
+ if(api_v1_data_groups[i].value == group_method) {
+ r->internal.grouping_create = api_v1_data_groups[i].create;
+ r->internal.grouping_reset = api_v1_data_groups[i].reset;
+ r->internal.grouping_free = api_v1_data_groups[i].free;
+ r->internal.grouping_add = api_v1_data_groups[i].add;
+ r->internal.grouping_flush = api_v1_data_groups[i].flush;
+ r->internal.tier_query_fetch = api_v1_data_groups[i].tier_query_fetch;
+ found = 1;
+ }
+ }
+ if(!found) {
+ errno = 0;
+ internal_error(true, "QUERY: grouping method %u not found. Using 'average'", (unsigned int)group_method);
+ r->internal.grouping_create = grouping_create_average;
+ r->internal.grouping_reset = grouping_reset_average;
+ r->internal.grouping_free = grouping_free_average;
+ r->internal.grouping_add = grouping_add_average;
+ r->internal.grouping_flush = grouping_flush_average;
+ r->internal.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE;
+ }
+}
+
// ----------------------------------------------------------------------------
static void rrdr_disable_not_selected_dimensions(RRDR *r, RRDR_OPTIONS options, const char *dims,
@@ -335,7 +712,7 @@ static void rrdr_disable_not_selected_dimensions(RRDR *r, RRDR_OPTIONS options,
// check if all dimensions are hidden
if(unlikely(!dims_not_hidden_not_zero && dims_selected)) {
- // there are a few selected dimensions
+ // there are a few selected dimensions,
// but they are all zero
// enable the selected ones
// to avoid returning an empty chart
@@ -352,22 +729,20 @@ static inline RRDR_VALUE_FLAGS *UNUSED_FUNCTION(rrdr_line_options)(RRDR *r, long
return &r->o[ rrdr_line * r->d ];
}
-static inline calculated_number *UNUSED_FUNCTION(rrdr_line_values)(RRDR *r, long rrdr_line) {
+static inline NETDATA_DOUBLE *UNUSED_FUNCTION(rrdr_line_values)(RRDR *r, long rrdr_line) {
return &r->v[ rrdr_line * r->d ];
}
static inline long rrdr_line_init(RRDR *r, time_t t, long rrdr_line) {
rrdr_line++;
- #ifdef NETDATA_INTERNAL_CHECKS
-
- if(unlikely(rrdr_line >= r->n))
- error("INTERNAL ERROR: requested to step above RRDR size for chart '%s'", r->st->name);
+ internal_error(rrdr_line >= r->n,
+ "QUERY: requested to step above RRDR size for chart '%s'",
+ r->st->name);
- if(unlikely(r->t[rrdr_line] != 0 && r->t[rrdr_line] != t))
- error("INTERNAL ERROR: overwriting the timestamp of RRDR line %zu from %zu to %zu, of chart '%s'", (size_t)rrdr_line, (size_t)r->t[rrdr_line], (size_t)t, r->st->name);
-
- #endif
+ internal_error(r->t[rrdr_line] != 0 && r->t[rrdr_line] != t,
+ "QUERY: overwriting the timestamp of RRDR line %zu from %zu to %zu, of chart '%s'",
+ (size_t)rrdr_line, (size_t)r->t[rrdr_line], (size_t)t, r->st->name);
// save the time
r->t[rrdr_line] = t;
@@ -381,337 +756,737 @@ static inline void rrdr_done(RRDR *r, long rrdr_line) {
// ----------------------------------------------------------------------------
-// fill RRDR for a single dimension
+// tier management
-static inline void do_dimension_variablestep(
- RRDR *r
- , long points_wanted
- , RRDDIM *rd
- , long dim_id_in_rrdr
- , time_t after_wanted
- , time_t before_wanted
- , uint32_t options
-){
-// RRDSET *st = r->st;
+static int rrddim_find_best_tier_for_timeframe(RRDDIM *rd, time_t after_wanted, time_t before_wanted, long points_wanted) {
+ if(unlikely(storage_tiers < 2))
+ return 0;
+
+ if(unlikely(after_wanted == before_wanted || points_wanted <= 0 || !rd || !rd->rrdset)) {
- time_t
- now = after_wanted,
- dt = r->update_every,
- max_date = 0,
- min_date = 0;
+ if(!rd)
+ internal_error(true, "QUERY: NULL dimension - invalid params to tier calculation");
+ else
+ internal_error(true, "QUERY: chart '%s' dimension '%s' invalid params to tier calculation",
+ (rd->rrdset)?rd->rrdset->name:"unknown", rd->name);
- long
-// group_size = r->group,
- points_added = 0,
- values_in_group = 0,
- values_in_group_non_zero = 0,
- rrdr_line = -1;
+ return 0;
+ }
- RRDR_VALUE_FLAGS
- group_value_flags = RRDR_VALUE_NOTHING;
+ //BUFFER *wb = buffer_create(1000);
+ //buffer_sprintf(wb, "Best tier for chart '%s', dim '%s', from %ld to %ld (dur %ld, every %d), points %ld",
+ // rd->rrdset->name, rd->name, after_wanted, before_wanted, before_wanted - after_wanted, rd->update_every, points_wanted);
- struct rrddim_query_handle handle;
+ long weight[storage_tiers];
+
+ for(int tier = 0; tier < storage_tiers ; tier++) {
+ if(unlikely(!rd->tiers[tier])) {
+ internal_error(true, "QUERY: tier %d of chart '%s' dimension '%s' not initialized",
+ tier, rd->rrdset->name, rd->name);
+ // buffer_free(wb);
+ return 0;
+ }
+
+ time_t first_t = rd->tiers[tier]->query_ops.oldest_time(rd->tiers[tier]->db_metric_handle);
+ time_t last_t = rd->tiers[tier]->query_ops.latest_time(rd->tiers[tier]->db_metric_handle);
+
+ time_t common_after = MAX(first_t, after_wanted);
+ time_t common_before = MIN(last_t, before_wanted);
+
+ long time_coverage = (common_before - common_after) * 1000 / (before_wanted - after_wanted);
+ if(time_coverage < 0) time_coverage = 0;
+
+ int update_every = (int)rd->tiers[tier]->tier_grouping * (int)rd->update_every;
+ if(unlikely(update_every == 0)) {
+ internal_error(true, "QUERY: update_every of tier %d for chart '%s' dimension '%s' is zero. tg = %d, ue = %d",
+ tier, rd->rrdset->name, rd->name, rd->tiers[tier]->tier_grouping, rd->update_every);
+ // buffer_free(wb);
+ return 0;
+ }
- calculated_number min = r->min, max = r->max;
- size_t db_points_read = 0;
- time_t db_now = now;
- storage_number n_curr, n_prev = SN_EMPTY_SLOT;
- calculated_number value;
+ long points_available = (before_wanted - after_wanted) / update_every;
+ long points_delta = points_available - points_wanted;
+ long points_coverage = (points_delta < 0) ? points_available * 1000 / points_wanted: 1000;
- for(rd->state->query_ops.init(rd, &handle, now, before_wanted) ; points_added < points_wanted ; now += dt) {
- // make sure we return data in the proper time range
- if (unlikely(now > before_wanted)) {
+ if(points_available <= 0)
+ weight[tier] = -LONG_MAX;
+ else
+ weight[tier] = points_coverage;
+
+ // buffer_sprintf(wb, ": tier %d, first %ld, last %ld (dur %ld, tg %d, every %d), points %ld, tcoverage %ld, pcoverage %ld, weight %ld",
+ // tier, first_t, last_t, last_t - first_t, rd->tiers[tier]->tier_grouping, update_every,
+ // points_available, time_coverage, points_coverage, weight[tier]);
+ }
+
+ int best_tier = 0;
+ for(int tier = 1; tier < storage_tiers ; tier++) {
+ if(weight[tier] >= weight[best_tier])
+ best_tier = tier;
+ }
+
+ if(weight[best_tier] == -LONG_MAX)
+ best_tier = 0;
+
+ //buffer_sprintf(wb, ": final best tier %d", best_tier);
+ //internal_error(true, "%s", buffer_tostring(wb));
+ //buffer_free(wb);
+
+ return best_tier;
+}
+
+static int rrdset_find_natural_update_every_for_timeframe(RRDSET *st, time_t after_wanted, time_t before_wanted, long points_wanted, RRDR_OPTIONS options, int tier) {
+ int ret = st->update_every;
+
+ if(unlikely(!st->dimensions))
+ return ret;
+
+ rrdset_rdlock(st);
+ int best_tier;
+
+ if(options & RRDR_OPTION_SELECTED_TIER && tier >= 0 && tier < storage_tiers)
+ best_tier = tier;
+ else
+ best_tier = rrddim_find_best_tier_for_timeframe(st->dimensions, after_wanted, before_wanted, points_wanted);
+
+ if(!st->dimensions->tiers[best_tier]) {
+ internal_error(
+ true,
+ "QUERY: tier %d on chart '%s', is not initialized", best_tier, st->name);
+ }
+ else {
+ ret = (int)st->dimensions->tiers[best_tier]->tier_grouping * (int)st->update_every;
+ if(unlikely(!ret)) {
+ internal_error(
+ true,
+ "QUERY: update_every calculated to be zero on chart '%s', tier_grouping %d, update_every %d",
+ st->name, st->dimensions->tiers[best_tier]->tier_grouping, st->update_every);
+
+ ret = st->update_every;
+ }
+ }
+
+ rrdset_unlock(st);
+
+ return ret;
+}
+
+// ----------------------------------------------------------------------------
+// query ops
+
+typedef struct query_point {
+ time_t end_time;
+ time_t start_time;
+ NETDATA_DOUBLE value;
+ NETDATA_DOUBLE anomaly;
+ SN_FLAGS flags;
#ifdef NETDATA_INTERNAL_CHECKS
- r->internal.log = "stopped, because attempted to access the db after 'wanted before'";
+ size_t id;
#endif
- break;
- }
- if (unlikely(now < after_wanted)) {
+} QUERY_POINT;
+
+QUERY_POINT QUERY_POINT_EMPTY = {
+ .end_time = 0,
+ .start_time = 0,
+ .value = NAN,
+ .anomaly = 0,
+ .flags = SN_FLAG_NONE,
#ifdef NETDATA_INTERNAL_CHECKS
- r->internal.log = "skipped, because attempted to access the db before 'wanted after'";
+ .id = 0,
#endif
- continue;
- }
+};
- while (now >= db_now && (!rd->state->query_ops.is_finished(&handle) ||
- does_storage_number_exist(n_prev))) {
- value = NAN;
- if (does_storage_number_exist(n_prev)) {
- // use the previously read database value
- n_curr = n_prev;
- } else {
- // read the value from the database
- n_curr = rd->state->query_ops.next_metric(&handle, &db_now);
- }
- n_prev = SN_EMPTY_SLOT;
- // db_now has a different value than above
- if (likely(now >= db_now)) {
- if (likely(does_storage_number_exist(n_curr))) {
- if (options & RRDR_OPTION_ANOMALY_BIT)
- value = (n_curr & SN_ANOMALY_BIT) ? 0.0 : 100.0;
- else
- value = unpack_storage_number(n_curr);
-
- if (likely(value != 0.0))
- values_in_group_non_zero++;
-
- if (unlikely(did_storage_number_reset(n_curr)))
- group_value_flags |= RRDR_VALUE_RESET;
- }
- } else {
- // We must postpone processing the value and fill the result with gaps instead
- if (likely(does_storage_number_exist(n_curr))) {
- n_prev = n_curr;
- }
- }
- // add this value to grouping
- if(likely(!isnan(value)))
- r->internal.grouping_add(r, value);
+#ifdef NETDATA_INTERNAL_CHECKS
+#define query_point_set_id(point, point_id) (point).id = point_id
+#else
+#define query_point_set_id(point, point_id) debug_dummy()
+#endif
- values_in_group++;
- db_points_read++;
- }
+typedef struct query_plan_entry {
+ size_t tier;
+ time_t after;
+ time_t before;
+} QUERY_PLAN_ENTRY;
- if (0 == values_in_group) {
- // add NAN to grouping
- r->internal.grouping_add(r, NAN);
- }
+typedef struct query_plan {
+ size_t entries;
+ QUERY_PLAN_ENTRY data[RRD_STORAGE_TIERS*2];
+} QUERY_PLAN;
+
+typedef struct query_engine_ops {
+ // configuration
+ RRDR *r;
+ RRDDIM *rd;
+ time_t view_update_every;
+ time_t query_granularity;
+ TIER_QUERY_FETCH tier_query_fetch;
+
+ // query planer
+ QUERY_PLAN plan;
+ size_t current_plan;
+ time_t current_plan_expire_time;
+
+ // storage queries
+ size_t tier;
+ struct rrddim_tier *tier_ptr;
+ struct rrddim_query_handle handle;
+ STORAGE_POINT (*next_metric)(struct rrddim_query_handle *handle);
+ int (*is_finished)(struct rrddim_query_handle *handle);
+ void (*finalize)(struct rrddim_query_handle *handle);
- rrdr_line = rrdr_line_init(r, now, rrdr_line);
+ // aggregating points over time
+ void (*grouping_add)(struct rrdresult *r, NETDATA_DOUBLE value);
+ NETDATA_DOUBLE (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+ size_t group_points_non_zero;
+ size_t group_points_added;
+ NETDATA_DOUBLE group_anomaly_rate;
+ RRDR_VALUE_FLAGS group_value_flags;
- if(unlikely(!min_date)) min_date = now;
- max_date = now;
+ // statistics
+ size_t db_total_points_read;
+ size_t db_points_read_per_tier[RRD_STORAGE_TIERS];
+} QUERY_ENGINE_OPS;
- // find the place to store our values
- RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_line * r->d + dim_id_in_rrdr];
- // update the dimension options
- if(likely(values_in_group_non_zero))
- r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
+// ----------------------------------------------------------------------------
+// query planer
- // store the specific point options
- *rrdr_value_options_ptr = group_value_flags;
+#define query_plan_should_switch_plan(ops, now) ((now) >= (ops).current_plan_expire_time)
- // store the value
- value = r->internal.grouping_flush(r, rrdr_value_options_ptr);
- r->v[rrdr_line * r->d + dim_id_in_rrdr] = value;
+static void query_planer_activate_plan(QUERY_ENGINE_OPS *ops, size_t plan_id, time_t overwrite_after) {
+ if(unlikely(plan_id >= ops->plan.entries))
+ plan_id = ops->plan.entries - 1;
- if(likely(points_added || dim_id_in_rrdr)) {
- // find the min/max across all dimensions
+ time_t after = ops->plan.data[plan_id].after;
+ time_t before = ops->plan.data[plan_id].before;
- if(unlikely(value < min)) min = value;
- if(unlikely(value > max)) max = value;
+ if(overwrite_after > after && overwrite_after < before)
+ after = overwrite_after;
- }
- else {
- // runs only when dim_id_in_rrdr == 0 && points_added == 0
- // so, on the first point added for the query.
- min = max = value;
+ ops->tier = ops->plan.data[plan_id].tier;
+ ops->tier_ptr = ops->rd->tiers[ops->tier];
+ ops->tier_ptr->query_ops.init(ops->tier_ptr->db_metric_handle, &ops->handle, after, before, ops->r->internal.tier_query_fetch);
+ ops->next_metric = ops->tier_ptr->query_ops.next_metric;
+ ops->is_finished = ops->tier_ptr->query_ops.is_finished;
+ ops->finalize = ops->tier_ptr->query_ops.finalize;
+ ops->current_plan = plan_id;
+ ops->current_plan_expire_time = ops->plan.data[plan_id].before;
+}
+
+static void query_planer_next_plan(QUERY_ENGINE_OPS *ops, time_t now, time_t last_point_end_time) {
+ internal_error(now < ops->current_plan_expire_time && now < ops->plan.data[ops->current_plan].before,
+ "QUERY: switching query plan too early!");
+
+ time_t next_plan_before_time;
+ do {
+ ops->current_plan++;
+
+ if (ops->current_plan >= ops->plan.entries) {
+ ops->current_plan = ops->plan.entries - 1;
+ return;
}
- points_added++;
- values_in_group = 0;
- group_value_flags = RRDR_VALUE_NOTHING;
- values_in_group_non_zero = 0;
+ next_plan_before_time = ops->plan.data[ops->current_plan].before;
+ } while(now >= next_plan_before_time || last_point_end_time >= next_plan_before_time);
+
+ if(ops->finalize) {
+ ops->finalize(&ops->handle);
+ ops->finalize = NULL;
}
- rd->state->query_ops.finalize(&handle);
- r->internal.db_points_read += db_points_read;
- r->internal.result_points_generated += points_added;
+ query_planer_activate_plan(ops, ops->current_plan, MIN(now, last_point_end_time));
- r->min = min;
- r->max = max;
- r->before = max_date;
- r->after = min_date - (r->group - 1) * dt;
- rrdr_done(r, rrdr_line);
+ // internal_error(true, "QUERY: switched plan to %zu (all is %zu), previous expiration was %ld, this starts at %ld, now is %ld, last_point_end_time %ld", ops->current_plan, ops->plan.entries, ops->plan.data[ops->current_plan-1].before, ops->plan.data[ops->current_plan].after, now, last_point_end_time);
+}
- #ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely(r->rows != points_added))
- error("INTERNAL ERROR: %s.%s added %zu rows, but RRDR says I added %zu.", r->st->name, rd->name, (size_t)points_added, (size_t)r->rows);
- #endif
+static int compare_query_plan_entries_on_start_time(const void *a, const void *b) {
+ QUERY_PLAN_ENTRY *p1 = (QUERY_PLAN_ENTRY *)a;
+ QUERY_PLAN_ENTRY *p2 = (QUERY_PLAN_ENTRY *)b;
+ return (p1->after < p2->after)?-1:1;
}
-static inline void do_dimension_fixedstep(
- RRDR *r
- , long points_wanted
- , RRDDIM *rd
- , long dim_id_in_rrdr
- , time_t after_wanted
- , time_t before_wanted
- , uint32_t options
-){
- time_t now = after_wanted,
- dt = r->update_every / r->group, /* usually is st->update_every */
- max_date = 0,
- min_date = 0;
+static void query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before_wanted, long points_wanted) {
+ RRDDIM *rd = ops->rd;
- long group_size = r->group,
- points_added = 0,
- values_in_group = 0,
- values_in_group_non_zero = 0,
- rrdr_line = -1;
+ //BUFFER *wb = buffer_create(1000);
+ //buffer_sprintf(wb, "QUERY PLAN for chart '%s' dimension '%s', from %ld to %ld:", rd->rrdset->name, rd->name, after_wanted, before_wanted);
- RRDR_VALUE_FLAGS group_value_flags = RRDR_VALUE_NOTHING;
+ // put our selected tier as the first plan
+ size_t selected_tier;
- struct rrddim_query_handle handle;
+ if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER && ops->r->internal.query_tier >= 0 && ops->r->internal.query_tier < storage_tiers) {
+ selected_tier = ops->r->internal.query_tier;
+ }
+ else {
- calculated_number min = r->min, max = r->max;
- size_t db_points_read = 0;
- time_t db_now = now;
- time_t first_time_t = rrddim_first_entry_t(rd);
+ selected_tier = rrddim_find_best_tier_for_timeframe(rd, after_wanted, before_wanted, points_wanted);
- // cache the function pointers we need in the loop
- storage_number (*next_metric)(struct rrddim_query_handle *handle, time_t *current_time) = rd->state->query_ops.next_metric;
- void (*grouping_add)(struct rrdresult *r, calculated_number value) = r->internal.grouping_add;
- calculated_number (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) = r->internal.grouping_flush;
- RRD_MEMORY_MODE rrd_memory_mode = rd->rrd_memory_mode;
+ if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)
+ ops->r->internal.query_options &= ~RRDR_OPTION_SELECTED_TIER;
+ }
- for(rd->state->query_ops.init(rd, &handle, now, before_wanted) ; points_added < points_wanted ; now += dt) {
- // make sure we return data in the proper time range
- if(unlikely(now > before_wanted)) {
-#ifdef NETDATA_INTERNAL_CHECKS
- r->internal.log = "stopped, because attempted to access the db after 'wanted before'";
-#endif
- break;
+ ops->plan.entries = 1;
+ ops->plan.data[0].tier = selected_tier;
+ ops->plan.data[0].after = rd->tiers[selected_tier]->query_ops.oldest_time(rd->tiers[selected_tier]->db_metric_handle);
+ ops->plan.data[0].before = rd->tiers[selected_tier]->query_ops.latest_time(rd->tiers[selected_tier]->db_metric_handle);
+
+ if(!(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)) {
+ // the selected tier
+ time_t selected_tier_first_time_t = ops->plan.data[0].after;
+ time_t selected_tier_last_time_t = ops->plan.data[0].before;
+
+ //buffer_sprintf(wb, ": SELECTED tier %zu, from %ld to %ld", selected_tier, ops->plan.data[0].after, ops->plan.data[0].before);
+
+ // check if our selected tier can start the query
+ if (selected_tier_first_time_t > after_wanted) {
+ // we need some help from other tiers
+ for (int tr = (int)selected_tier + 1; tr < storage_tiers; tr++) {
+ // find the first time of this tier
+ time_t first_time_t = rd->tiers[tr]->query_ops.oldest_time(rd->tiers[tr]->db_metric_handle);
+
+ //buffer_sprintf(wb, ": EVAL AFTER tier %d, %ld", tier, first_time_t);
+
+ // can it help?
+ if (first_time_t < selected_tier_first_time_t) {
+ // it can help us add detail at the beginning of the query
+ QUERY_PLAN_ENTRY t = {
+ .tier = tr,
+ .after = (first_time_t < after_wanted) ? after_wanted : first_time_t,
+ .before = selected_tier_first_time_t};
+ ops->plan.data[ops->plan.entries++] = t;
+
+ // prepare for the tier
+ selected_tier_first_time_t = t.after;
+
+ if (t.after <= after_wanted)
+ break;
+ }
+ }
}
- if(unlikely(now < after_wanted)) {
-#ifdef NETDATA_INTERNAL_CHECKS
- r->internal.log = "skipped, because attempted to access the db before 'wanted after'";
-#endif
- continue;
+ // check if our selected tier can finish the query
+ if (selected_tier_last_time_t < before_wanted) {
+ // we need some help from other tiers
+ for (int tr = (int)selected_tier - 1; tr >= 0; tr--) {
+ // find the last time of this tier
+ time_t last_time_t = rd->tiers[tr]->query_ops.latest_time(rd->tiers[tr]->db_metric_handle);
+
+ //buffer_sprintf(wb, ": EVAL BEFORE tier %d, %ld", tier, last_time_t);
+
+ // can it help?
+ if (last_time_t > selected_tier_last_time_t) {
+ // it can help us add detail at the end of the query
+ QUERY_PLAN_ENTRY t = {
+ .tier = tr,
+ .after = selected_tier_last_time_t,
+ .before = (last_time_t > before_wanted) ? before_wanted : last_time_t};
+ ops->plan.data[ops->plan.entries++] = t;
+
+ // prepare for the tier
+ selected_tier_last_time_t = t.before;
+
+ if (t.before >= before_wanted)
+ break;
+ }
+ }
}
+ }
- // read the value from the database
- //storage_number n = rd->values[slot];
+ // sort the query plan
+ if(ops->plan.entries > 1)
+ qsort(&ops->plan.data, ops->plan.entries, sizeof(QUERY_PLAN_ENTRY), compare_query_plan_entries_on_start_time);
-#ifdef NETDATA_INTERNAL_CHECKS
- struct mem_query_handle* mem_handle = (struct mem_query_handle*)handle.handle;
- if ((rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) &&
- (rrdset_time2slot(r->st, now) != (long unsigned)(mem_handle->slot))) {
- error("INTERNAL CHECK: Unaligned query for %s, database slot: %lu, expected slot: %lu", rd->id, (long unsigned)mem_handle->slot, rrdset_time2slot(r->st, now));
- }
-#endif
+ // make sure it has the whole timeframe we need
+ ops->plan.data[0].after = after_wanted;
+ ops->plan.data[ops->plan.entries - 1].before = before_wanted;
- db_now = now; // this is needed to set db_now in case the next_metric implementation does not set it
+ //buffer_sprintf(wb, ": FINAL STEPS %zu", ops->plan.entries);
- storage_number n;
- calculated_number value;
+ //for(size_t i = 0; i < ops->plan.entries ;i++)
+ // buffer_sprintf(wb, ": STEP %zu = use tier %zu from %ld to %ld", i+1, ops->plan.data[i].tier, ops->plan.data[i].after, ops->plan.data[i].before);
- if (unlikely(rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE && now <= first_time_t)) {
- n = SN_EMPTY_SLOT;
- value = NAN;
- }
- else {
- // load the metric value
- n = next_metric(&handle, &db_now);
- db_points_read++;
-
- // and unpack it
- if(likely(does_storage_number_exist(n))) {
- if (options & RRDR_OPTION_ANOMALY_BIT)
- value = (n & SN_ANOMALY_BIT) ? 0.0 : 100.0;
- else
- value = unpack_storage_number(n);
- }
- else
- value = NAN;
- }
+ //internal_error(true, "%s", buffer_tostring(wb));
- if(unlikely(db_now > before_wanted)) {
-#ifdef NETDATA_INTERNAL_CHECKS
- r->internal.log = "stopped, because attempted to access the db after 'wanted before'";
-#endif
- break;
- }
+ query_planer_activate_plan(ops, 0, 0);
+}
- // this loop exists only to fill nulls
- // so, if there is a value already, we use it for the first iteration
- // but the following iterations will just fill nulls to the destination
- for ( ; now <= db_now ; now += dt, value = NAN, n = SN_EMPTY_SLOT) {
- if(likely(does_storage_number_exist(n))) {
-
-#if defined(NETDATA_INTERNAL_CHECKS) && defined(ENABLE_DBENGINE)
- if(now >= db_now) {
- struct rrdeng_query_handle *rrd_handle = (struct rrdeng_query_handle *)handle.handle;
- if ((rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) && (now != rrd_handle->now))
- error(
- "INTERNAL CHECK: Unaligned query for %s, database time: %ld, expected time: %ld",
- rd->id,
- (long)rrd_handle->now,
- (long)now);
- }
-#endif
- if(likely(value != 0.0))
- values_in_group_non_zero++;
+// ----------------------------------------------------------------------------
+// dimension level query engine
+
+#define query_interpolate_point(this_point, last_point, now) do { \
+ if(likely( \
+ /* the point to interpolate is more than 1s wide */ \
+ (this_point).end_time - (this_point).start_time > 1 \
+ \
+ /* the two points are exactly next to each other */ \
+ && (last_point).end_time == (this_point).start_time \
+ \
+ /* both points are valid numbers */ \
+ && netdata_double_isnumber((this_point).value) \
+ && netdata_double_isnumber((last_point).value) \
+ \
+ )) { \
+ (this_point).value = (last_point).value + ((this_point).value - (last_point).value) * (1.0 - (NETDATA_DOUBLE)((this_point).end_time - (now)) / (NETDATA_DOUBLE)((this_point).end_time - (this_point).start_time)); \
+ (this_point).end_time = now; \
+ } \
+} while(0)
+
+#define query_add_point_to_group(r, point, ops) do { \
+ if(likely(netdata_double_isnumber((point).value))) { \
+ if(likely(fpclassify((point).value) != FP_ZERO)) \
+ (ops).group_points_non_zero++; \
+ \
+ if(unlikely((point).flags & SN_FLAG_RESET)) \
+ (ops).group_value_flags |= RRDR_VALUE_RESET; \
+ \
+ (ops).grouping_add(r, (point).value); \
+ } \
+ \
+ (ops).group_points_added++; \
+ (ops).group_anomaly_rate += (point).anomaly; \
+} while(0)
+
+static inline void rrd2rrdr_do_dimension(
+ RRDR *r
+ , long points_wanted
+ , RRDDIM *rd
+ , long dim_id_in_rrdr
+ , time_t after_wanted
+ , time_t before_wanted
+){
+ time_t max_date = 0,
+ min_date = 0;
+
+ size_t points_added = 0;
+
+ QUERY_ENGINE_OPS ops = {
+ .r = r,
+ .rd = rd,
+ .grouping_add = r->internal.grouping_add,
+ .grouping_flush = r->internal.grouping_flush,
+ .tier_query_fetch = r->internal.tier_query_fetch,
+ .view_update_every = r->update_every,
+ .query_granularity = r->update_every / r->group,
+ .group_value_flags = RRDR_VALUE_NOTHING
+ };
+
+ long rrdr_line = -1;
+ bool use_anomaly_bit_as_value = (r->internal.query_options & RRDR_OPTION_ANOMALY_BIT) ? true : false;
+
+ query_plan(&ops, after_wanted, before_wanted, points_wanted);
+
+ NETDATA_DOUBLE min = r->min, max = r->max;
+
+ QUERY_POINT last2_point = QUERY_POINT_EMPTY;
+ QUERY_POINT last1_point = QUERY_POINT_EMPTY;
+ QUERY_POINT new_point = QUERY_POINT_EMPTY;
+
+ time_t now_start_time = after_wanted - ops.query_granularity;
+ time_t now_end_time = after_wanted + ops.view_update_every - ops.query_granularity;
- if(unlikely(did_storage_number_reset(n)))
- group_value_flags |= RRDR_VALUE_RESET;
+ // The main loop, based on the query granularity we need
+ for( ; (long)points_added < points_wanted ; now_start_time = now_end_time, now_end_time += ops.view_update_every) {
- grouping_add(r, value);
+ if(query_plan_should_switch_plan(ops, now_end_time))
+ query_planer_next_plan(&ops, now_end_time, new_point.end_time);
+
+ // read all the points of the db, prior to the time we need (now_end_time)
+
+
+ size_t count_same_end_time = 0;
+ while(count_same_end_time < 100) {
+ if(likely(count_same_end_time == 0)) {
+ last2_point = last1_point;
+ last1_point = new_point;
}
- // add this value for grouping
- values_in_group++;
+ if(unlikely(ops.is_finished(&ops.handle))) {
+ if(count_same_end_time != 0) {
+ last2_point = last1_point;
+ last1_point = new_point;
+ }
+ new_point = QUERY_POINT_EMPTY;
+ new_point.start_time = last1_point.end_time;
+ new_point.end_time = now_end_time;
+ break;
+ }
+
+ // fetch the new point
+ {
+ STORAGE_POINT sp = ops.next_metric(&ops.handle);
+
+ ops.db_points_read_per_tier[ops.tier]++;
+ ops.db_total_points_read++;
+
+ new_point.start_time = sp.start_time;
+ new_point.end_time = sp.end_time;
+ new_point.anomaly = sp.count ? (NETDATA_DOUBLE)sp.anomaly_count * 100.0 / (NETDATA_DOUBLE)sp.count : 0.0;
+ query_point_set_id(new_point, ops.db_total_points_read);
+
+ // set the right value to the point we got
+ if(likely(!storage_point_is_unset(sp) && !storage_point_is_empty(sp))) {
- if(unlikely(values_in_group == group_size)) {
- rrdr_line = rrdr_line_init(r, now, rrdr_line);
- size_t rrdr_o_v_index = rrdr_line * r->d + dim_id_in_rrdr;
+ if(unlikely(use_anomaly_bit_as_value))
+ new_point.value = new_point.anomaly;
- if(unlikely(!min_date)) min_date = now;
- max_date = now;
+ else {
+ switch (ops.tier_query_fetch) {
+ default:
+ case TIER_QUERY_FETCH_AVERAGE:
+ new_point.value = sp.sum / sp.count;
+ break;
- // find the place to store our values
- RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_o_v_index];
+ case TIER_QUERY_FETCH_MIN:
+ new_point.value = sp.min;
+ break;
- // update the dimension options
- if(likely(values_in_group_non_zero))
- r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
+ case TIER_QUERY_FETCH_MAX:
+ new_point.value = sp.max;
+ break;
- // store the specific point options
- *rrdr_value_options_ptr = group_value_flags;
+ case TIER_QUERY_FETCH_SUM:
+ new_point.value = sp.sum;
+ break;
+ };
+ }
+ }
+ else {
+ new_point.value = NAN;
+ new_point.flags = SN_FLAG_NONE;
+ }
+ }
+
+ // check if the db is giving us zero duration points
+ if(unlikely(new_point.start_time == new_point.end_time)) {
+ internal_error(true, "QUERY: next_metric(%s, %s) returned point %zu start time %ld, end time %ld, that are both equal",
+ rd->rrdset->name, rd->name, new_point.id, new_point.start_time, new_point.end_time);
- // store the group value
- calculated_number group_value = grouping_flush(r, rrdr_value_options_ptr);
- r->v[rrdr_o_v_index] = group_value;
+ new_point.start_time = new_point.end_time - ((time_t)ops.tier_ptr->tier_grouping * (time_t)ops.rd->update_every);
+ }
+
+ // check if the db is advancing the query
+ if(unlikely(new_point.end_time <= last1_point.end_time)) {
+ internal_error(true, "QUERY: next_metric(%s, %s) returned point %zu from %ld time %ld, before the last point %zu end time %ld, now is %ld to %ld",
+ rd->rrdset->name, rd->name, new_point.id, new_point.start_time, new_point.end_time,
+ last1_point.id, last1_point.end_time, now_start_time, now_end_time);
+
+ count_same_end_time++;
+ continue;
+ }
+ count_same_end_time = 0;
- if(likely(points_added || dim_id_in_rrdr)) {
- // find the min/max across all dimensions
+ // decide how to use this point
+ if(likely(new_point.end_time < now_end_time)) { // likely to favor tier0
+ // this db point ends before our now_end_time
- if(unlikely(group_value < min)) min = group_value;
- if(unlikely(group_value > max)) max = group_value;
+ if(likely(new_point.end_time >= now_start_time)) { // likely to favor tier0
+ // this db point ends after our now_start time
+ query_add_point_to_group(r, new_point, ops);
}
else {
- // runs only when dim_id_in_rrdr == 0 && points_added == 0
- // so, on the first point added for the query.
- min = max = group_value;
+ // we don't need this db point
+ // it is totally outside our current time-frame
+
+ // this is desirable for the first point of the query
+ // because it allows us to interpolate the next point
+ // at exactly the time we will want
+
+ // we only log if this is not point 1
+ internal_error(new_point.end_time < after_wanted && new_point.id > 1,
+ "QUERY: next_metric(%s, %s) returned point %zu from %ld time %ld, which is entirely before our current timeframe %ld to %ld (and before the entire query, after %ld, before %ld)",
+ rd->rrdset->name, rd->name,
+ new_point.id, new_point.start_time, new_point.end_time,
+ now_start_time, now_end_time,
+ after_wanted, before_wanted);
}
- points_added++;
- values_in_group = 0;
- group_value_flags = RRDR_VALUE_NOTHING;
- values_in_group_non_zero = 0;
+ }
+ else {
+ // the point ends in the future
+ // so, we will interpolate it below, at the inner loop
+ break;
}
}
- now = db_now;
+
+ if(unlikely(count_same_end_time)) {
+ internal_error(true,
+ "QUERY: the database does not advance the query, it returned an end time less or equal to the end time of the last point we got %ld, %zu times",
+ last1_point.end_time, count_same_end_time);
+
+ if(unlikely(new_point.end_time <= last1_point.end_time))
+ new_point.end_time = now_end_time;
+ }
+
+ // the inner loop
+ // we have 3 points in memory: last2, last1, new
+ // we select the one to use based on their timestamps
+
+ size_t iterations = 0;
+ for ( ; now_end_time <= new_point.end_time && (long)points_added < points_wanted ;
+ now_end_time += ops.view_update_every, iterations++) {
+
+ // now_start_time is wrong in this loop
+ // but, we don't need it
+
+ QUERY_POINT current_point;
+
+ if(likely(now_end_time > new_point.start_time)) {
+ // it is time for our NEW point to be used
+ current_point = new_point;
+ query_interpolate_point(current_point, last1_point, now_end_time);
+
+ internal_error(current_point.id > 0 && last1_point.id == 0 && current_point.end_time > after_wanted && current_point.end_time > now_end_time,
+ "QUERY: on '%s', dim '%s', after %ld, before %ld, view update every %ld, query granularity %ld,"
+ " interpolating point %zu (from %ld to %ld) at %ld, but we could really favor by having last_point1 in this query.",
+ rd->rrdset->name, rd->name, after_wanted, before_wanted, ops.view_update_every, ops.query_granularity,
+ current_point.id, current_point.start_time, current_point.end_time, now_end_time);
+ }
+ else if(likely(now_end_time <= last1_point.end_time)) {
+ // our LAST point is still valid
+ current_point = last1_point;
+ query_interpolate_point(current_point, last2_point, now_end_time);
+
+ internal_error(current_point.id > 0 && last2_point.id == 0 && current_point.end_time > after_wanted && current_point.end_time > now_end_time,
+ "QUERY: on '%s', dim '%s', after %ld, before %ld, view update every %ld, query granularity %ld,"
+ " interpolating point %zu (from %ld to %ld) at %ld, but we could really favor by having last_point2 in this query.",
+ rd->rrdset->name, rd->name, after_wanted, before_wanted, ops.view_update_every, ops.query_granularity,
+ current_point.id, current_point.start_time, current_point.end_time, now_end_time);
+ }
+ else {
+ // a GAP, we don't have a value this time
+ current_point = QUERY_POINT_EMPTY;
+ }
+
+ query_add_point_to_group(r, current_point, ops);
+
+ rrdr_line = rrdr_line_init(r, now_end_time, rrdr_line);
+ size_t rrdr_o_v_index = rrdr_line * r->d + dim_id_in_rrdr;
+
+ if(unlikely(!min_date)) min_date = now_end_time;
+ max_date = now_end_time;
+
+ // find the place to store our values
+ RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_o_v_index];
+
+ // update the dimension options
+ if(likely(ops.group_points_non_zero))
+ r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
+
+ // store the specific point options
+ *rrdr_value_options_ptr = ops.group_value_flags;
+
+ // store the group value
+ NETDATA_DOUBLE group_value = ops.grouping_flush(r, rrdr_value_options_ptr);
+ r->v[rrdr_o_v_index] = group_value;
+
+ // we only store uint8_t anomaly rates,
+ // so let's get double precision by storing
+ // anomaly rates in the range 0 - 200
+ r->ar[rrdr_o_v_index] = ops.group_anomaly_rate / (NETDATA_DOUBLE)ops.group_points_added;
+
+ if(likely(points_added || dim_id_in_rrdr)) {
+ // find the min/max across all dimensions
+
+ if(unlikely(group_value < min)) min = group_value;
+ if(unlikely(group_value > max)) max = group_value;
+
+ }
+ else {
+ // runs only when dim_id_in_rrdr == 0 && points_added == 0
+ // so, on the first point added for the query.
+ min = max = group_value;
+ }
+
+ points_added++;
+ ops.group_points_added = 0;
+ ops.group_value_flags = RRDR_VALUE_NOTHING;
+ ops.group_points_non_zero = 0;
+ ops.group_anomaly_rate = 0;
+ }
+ // the loop above increased "now" by query_granularity,
+ // but the main loop will increase it too,
+ // so, let's undo the last iteration of this loop
+ if(iterations)
+ now_end_time -= ops.view_update_every;
}
- rd->state->query_ops.finalize(&handle);
+ ops.finalize(&ops.handle);
- r->internal.db_points_read += db_points_read;
r->internal.result_points_generated += points_added;
+ r->internal.db_points_read += ops.db_total_points_read;
+ for(int tr = 0; tr < storage_tiers ; tr++)
+ r->internal.tier_points_read[tr] += ops.db_points_read_per_tier[tr];
r->min = min;
r->max = max;
r->before = max_date;
- r->after = min_date - (r->group - 1) * dt;
+ r->after = min_date - ops.view_update_every + ops.query_granularity;
rrdr_done(r, rrdr_line);
-#ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely(r->rows != points_added))
- error("INTERNAL ERROR: %s.%s added %zu rows, but RRDR says I added %zu.", r->st->name, rd->name, (size_t)points_added, (size_t)r->rows);
-#endif
+ internal_error((long)points_added != points_wanted,
+ "QUERY: query on %s/%s requested %zu points, but RRDR added %zu (%zu db points read).",
+ r->st->name, rd->name, (size_t)points_wanted, (size_t)points_added, ops.db_total_points_read);
+}
+
+// ----------------------------------------------------------------------------
+// fill the gap of a tier
+
+extern void store_metric_at_tier(RRDDIM *rd, struct rrddim_tier *t, STORAGE_POINT sp, usec_t now_ut);
+
+void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, int tier, time_t now) {
+ if(unlikely(tier < 0 || tier >= storage_tiers)) return;
+ if(storage_tiers_backfill[tier] == RRD_BACKFILL_NONE) return;
+
+ struct rrddim_tier *t = rd->tiers[tier];
+ if(unlikely(!t)) return;
+
+ time_t latest_time_t = t->query_ops.latest_time(t->db_metric_handle);
+ time_t granularity = (time_t)t->tier_grouping * (time_t)rd->update_every;
+ time_t time_diff = now - latest_time_t;
+
+ // if the user wants only NEW backfilling, and we don't have any data
+ if(storage_tiers_backfill[tier] == RRD_BACKFILL_NEW && latest_time_t <= 0) return;
+
+ // there is really nothing we can do
+ if(now <= latest_time_t || time_diff < granularity) return;
+
+ struct rrddim_query_handle handle;
+
+ size_t all_points_read = 0;
+
+ // for each lower tier
+ for(int tr = tier - 1; tr >= 0 ;tr--){
+ time_t smaller_tier_first_time = rd->tiers[tr]->query_ops.oldest_time(rd->tiers[tr]->db_metric_handle);
+ time_t smaller_tier_last_time = rd->tiers[tr]->query_ops.latest_time(rd->tiers[tr]->db_metric_handle);
+ if(smaller_tier_last_time <= latest_time_t) continue; // it is as bad as we are
+
+ long after_wanted = (latest_time_t < smaller_tier_first_time) ? smaller_tier_first_time : latest_time_t;
+ long before_wanted = smaller_tier_last_time;
+
+ struct rrddim_tier *tmp = rd->tiers[tr];
+ tmp->query_ops.init(tmp->db_metric_handle, &handle, after_wanted, before_wanted, TIER_QUERY_FETCH_AVERAGE);
+
+ size_t points = 0;
+
+ while(!tmp->query_ops.is_finished(&handle)) {
+
+ STORAGE_POINT sp = tmp->query_ops.next_metric(&handle);
+
+ if(sp.end_time > latest_time_t) {
+ latest_time_t = sp.end_time;
+ store_metric_at_tier(rd, t, sp, sp.end_time * USEC_PER_SEC);
+ points++;
+ }
+ }
+
+ all_points_read += points;
+ tmp->query_ops.finalize(&handle);
+
+ //internal_error(true, "DBENGINE: backfilled chart '%s', dimension '%s', tier %d, from %ld to %ld, with %zu points from tier %d",
+ // rd->rrdset->name, rd->name, tier, after_wanted, before_wanted, points, tr);
+ }
+
+ rrdr_query_completed(all_points_read, all_points_read);
}
// ----------------------------------------------------------------------------
@@ -719,8 +1494,9 @@ static inline void do_dimension_fixedstep(
#ifdef NETDATA_INTERNAL_CHECKS
static void rrd2rrdr_log_request_response_metadata(RRDR *r
+ , RRDR_OPTIONS options __maybe_unused
, RRDR_GROUPING group_method
- , int aligned
+ , bool aligned
, long group
, long resampling_time
, long resampling_group
@@ -736,9 +1512,9 @@ static void rrd2rrdr_log_request_response_metadata(RRDR *r
) {
netdata_rwlock_rdlock(&r->st->rrdset_rwlock);
info("INTERNAL ERROR: rrd2rrdr() on %s update every %d with %s grouping %s (group: %ld, resampling_time: %ld, resampling_group: %ld), "
- "after (got: %zu, want: %zu, req: %zu, db: %zu), "
- "before (got: %zu, want: %zu, req: %zu, db: %zu), "
- "duration (got: %zu, want: %zu, req: %zu, db: %zu), "
+ "after (got: %zu, want: %zu, req: %ld, db: %zu), "
+ "before (got: %zu, want: %zu, req: %ld, db: %zu), "
+ "duration (got: %zu, want: %zu, req: %ld, db: %zu), "
//"slot (after: %zu, before: %zu, delta: %zu), "
"points (got: %ld, want: %ld, req: %ld, db: %ld), "
"%s"
@@ -755,19 +1531,19 @@ static void rrd2rrdr_log_request_response_metadata(RRDR *r
// after
, (size_t)r->after
, (size_t)after_wanted
- , (size_t)after_requested
+ , after_requested
, (size_t)rrdset_first_entry_t_nolock(r->st)
// before
, (size_t)r->before
, (size_t)before_wanted
- , (size_t)before_requested
+ , before_requested
, (size_t)rrdset_last_entry_t_nolock(r->st)
// duration
, (size_t)(r->before - r->after + r->st->update_every)
, (size_t)(before_wanted - after_wanted + r->st->update_every)
- , (size_t)(before_requested - after_requested)
+ , before_requested - after_requested
, (size_t)((rrdset_last_entry_t_nolock(r->st) - rrdset_first_entry_t_nolock(r->st)) + r->st->update_every)
// slot
@@ -791,79 +1567,88 @@ static void rrd2rrdr_log_request_response_metadata(RRDR *r
#endif // NETDATA_INTERNAL_CHECKS
// Returns 1 if an absolute period was requested or 0 if it was a relative period
-static int rrdr_convert_before_after_to_absolute(
- long long *after_requestedp
- , long long *before_requestedp
- , int update_every
- , time_t first_entry_t
- , time_t last_entry_t
- , RRDR_OPTIONS options
-) {
+int rrdr_relative_window_to_absolute(long long *after, long long *before) {
+ time_t now = now_realtime_sec() - 1;
+
int absolute_period_requested = -1;
long long after_requested, before_requested;
- before_requested = *before_requestedp;
- after_requested = *after_requestedp;
-
- if(before_requested == 0 && after_requested == 0) {
- // dump the all the data
- before_requested = last_entry_t;
- after_requested = first_entry_t;
- absolute_period_requested = 0;
- }
+ before_requested = *before;
+ after_requested = *after;
// allow relative for before (smaller than API_RELATIVE_TIME_MAX)
if(ABS(before_requested) <= API_RELATIVE_TIME_MAX) {
- if(ABS(before_requested) % update_every) {
- // make sure it is multiple of st->update_every
- if(before_requested < 0) before_requested = before_requested - update_every -
- before_requested % update_every;
- else before_requested = before_requested + update_every - before_requested % update_every;
- }
- if(before_requested > 0) before_requested = first_entry_t + before_requested;
- else before_requested = last_entry_t + before_requested; //last_entry_t is not really now_t
- //TODO: fix before_requested to be relative to now_t
+ // if the user asked for a positive relative time,
+ // flip it to a negative
+ if(before_requested > 0)
+ before_requested = -before_requested;
+
+ before_requested = now + before_requested;
absolute_period_requested = 0;
}
// allow relative for after (smaller than API_RELATIVE_TIME_MAX)
if(ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
- if(after_requested == 0) after_requested = -update_every;
- if(ABS(after_requested) % update_every) {
- // make sure it is multiple of st->update_every
- if(after_requested < 0) after_requested = after_requested - update_every - after_requested % update_every;
- else after_requested = after_requested + update_every - after_requested % update_every;
- }
- after_requested = before_requested + after_requested;
+ if(after_requested > 0)
+ after_requested = -after_requested;
+
+ // if the user didn't give an after, use the number of points
+ // to give a sane default
+ if(after_requested == 0)
+ after_requested = -600;
+
+ // since the query engine now returns inclusive timestamps
+ // it is awkward to return 6 points when after=-5 is given
+ // so for relative queries we add 1 second, to give
+ // more predictable results to users.
+ after_requested = before_requested + after_requested + 1;
absolute_period_requested = 0;
}
if(absolute_period_requested == -1)
absolute_period_requested = 1;
- // make sure they are within our timeframe
- if(before_requested > last_entry_t) before_requested = last_entry_t;
- if(before_requested < first_entry_t && !(options & RRDR_OPTION_ALLOW_PAST))
- before_requested = first_entry_t;
-
- if(after_requested > last_entry_t) after_requested = last_entry_t;
- if(after_requested < first_entry_t && !(options & RRDR_OPTION_ALLOW_PAST))
- after_requested = first_entry_t;
-
- // check if they are reversed
+ // check if the parameters are flipped
if(after_requested > before_requested) {
- time_t tmp = before_requested;
+ long long t = before_requested;
before_requested = after_requested;
- after_requested = tmp;
+ after_requested = t;
+ }
+
+ // if the query requests future data
+ // shift the query back to be in the present time
+ // (this may also happen because of the rules above)
+ if(before_requested > now) {
+ long long delta = before_requested - now;
+ before_requested -= delta;
+ after_requested -= delta;
}
- *before_requestedp = before_requested;
- *after_requestedp = after_requested;
+ *before = before_requested;
+ *after = after_requested;
return absolute_period_requested;
}
-static RRDR *rrd2rrdr_fixedstep(
+// #define DEBUG_QUERY_LOGIC 1
+
+#ifdef DEBUG_QUERY_LOGIC
+#define query_debug_log_init() BUFFER *debug_log = buffer_create(1000)
+#define query_debug_log(args...) buffer_sprintf(debug_log, ##args)
+#define query_debug_log_fin() { \
+ info("QUERY: chart '%s', after:%lld, before:%lld, duration:%lld, points:%ld, res:%ld - wanted => after:%lld, before:%lld, points:%ld, group:%ld, granularity:%ld, resgroup:%ld, resdiv:" NETDATA_DOUBLE_FORMAT_AUTO " %s", st->name, after_requested, before_requested, before_requested - after_requested, points_requested, resampling_time_requested, after_wanted, before_wanted, points_wanted, group, query_granularity, resampling_group, resampling_divisor, buffer_tostring(debug_log)); \
+ buffer_free(debug_log); \
+ debug_log = NULL; \
+ }
+#define query_debug_log_free() do { buffer_free(debug_log); } while(0)
+#else
+#define query_debug_log_init() debug_dummy()
+#define query_debug_log(args...) debug_dummy()
+#define query_debug_log_fin() debug_dummy()
+#define query_debug_log_free() debug_dummy()
+#endif
+
+RRDR *rrd2rrdr(
ONEWAYALLOC *owa
, RRDSET *st
, long points_requested
@@ -873,564 +1658,272 @@ static RRDR *rrd2rrdr_fixedstep(
, long resampling_time_requested
, RRDR_OPTIONS options
, const char *dimensions
- , int update_every
- , time_t first_entry_t
- , time_t last_entry_t
- , int absolute_period_requested
, struct context_param *context_param_list
+ , const char *group_options
, int timeout
+ , int tier
) {
- int aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
-
- // the duration of the chart
- time_t duration = before_requested - after_requested;
- long available_points = duration / update_every;
-
- RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
-
- if(duration <= 0 || available_points <= 0)
- return rrdr_create(owa, st, 1, context_param_list);
-
- // check the number of wanted points in the result
- if(unlikely(points_requested < 0)) points_requested = -points_requested;
- if(unlikely(points_requested > available_points)) points_requested = available_points;
- if(unlikely(points_requested == 0)) points_requested = available_points;
-
- // calculate the desired grouping of source data points
- long group = available_points / points_requested;
- if(unlikely(group <= 0)) group = 1;
- if(unlikely(available_points % points_requested > points_requested / 2)) group++; // rounding to the closest integer
-
- // resampling_time_requested enforces a certain grouping multiple
- calculated_number resampling_divisor = 1.0;
- long resampling_group = 1;
- if(unlikely(resampling_time_requested > update_every)) {
- if (unlikely(resampling_time_requested > duration)) {
- // group_time is above the available duration
-
- #ifdef NETDATA_INTERNAL_CHECKS
- info("INTERNAL CHECK: %s: requested gtime %ld secs, is greater than the desired duration %ld secs", st->id, resampling_time_requested, duration);
- #endif
-
- after_requested = before_requested - resampling_time_requested;
- duration = before_requested - after_requested;
- available_points = duration / update_every;
- group = available_points / points_requested;
- }
-
- // if the duration is not aligned to resampling time
- // extend the duration to the past, to avoid a gap at the chart
- // only when the missing duration is above 1/10th of a point
- if(duration % resampling_time_requested) {
- time_t delta = duration % resampling_time_requested;
- if(delta > resampling_time_requested / 10) {
- after_requested -= resampling_time_requested - delta;
- duration = before_requested - after_requested;
- available_points = duration / update_every;
- group = available_points / points_requested;
- }
- }
-
- // the points we should group to satisfy gtime
- resampling_group = resampling_time_requested / update_every;
- if(unlikely(resampling_time_requested % update_every)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- info("INTERNAL CHECK: %s: requested gtime %ld secs, is not a multiple of the chart's data collection frequency %d secs", st->id, resampling_time_requested, update_every);
- #endif
-
- resampling_group++;
- }
-
- // adapt group according to resampling_group
- if(unlikely(group < resampling_group)) group = resampling_group; // do not allow grouping below the desired one
- if(unlikely(group % resampling_group)) group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
-
- //resampling_divisor = group / resampling_group;
- resampling_divisor = (calculated_number)(group * update_every) / (calculated_number)resampling_time_requested;
- }
-
- // now that we have group,
- // align the requested timeframe to fit it.
-
- if(aligned) {
- // alignment has been requested, so align the values
- before_requested -= before_requested % (group * update_every);
- after_requested -= after_requested % (group * update_every);
- }
-
- // we align the request on requested_before
- time_t before_wanted = before_requested;
- if(likely(before_wanted > last_entry_t)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, before_wanted is after db max", st->name);
- #endif
-
- before_wanted = last_entry_t - (last_entry_t % ( ((aligned)?group:1) * update_every ));
- }
- //size_t before_slot = rrdset_time2slot(st, before_wanted);
-
- // we need to estimate the number of points, for having
- // an integer number of values per point
- long points_wanted = (before_wanted - after_requested) / (update_every * group);
-
- time_t after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
- if(unlikely(after_wanted < first_entry_t)) {
- // hm... we go to the past, calculate again points_wanted using all the db from before_wanted to the beginning
- points_wanted = (before_wanted - first_entry_t) / group;
-
- // recalculate after wanted with the new number of points
- after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
-
- if(unlikely(after_wanted < first_entry_t)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, after_wanted is before db min", st->name);
- #endif
-
- after_wanted = first_entry_t - (first_entry_t % ( ((aligned)?group:1) * update_every )) + ( ((aligned)?group:1) * update_every );
- }
- }
- //size_t after_slot = rrdset_time2slot(st, after_wanted);
-
- // check if they are reversed
- if(unlikely(after_wanted > before_wanted)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, reversed wanted after/before", st->name);
- #endif
- time_t tmp = before_wanted;
- before_wanted = after_wanted;
- after_wanted = tmp;
+ // RULES
+ // points_requested = 0
+ // the user wants all the natural points the database has
+ //
+ // after_requested = 0
+ // the user wants to start the query from the oldest point in our database
+ //
+ // before_requested = 0
+ // the user wants the query to end to the latest point in our database
+ //
+ // when natural points are wanted, the query has to be aligned to the update_every
+ // of the database
+
+ long points_wanted = points_requested;
+ long long after_wanted = after_requested;
+ long long before_wanted = before_requested;
+ int update_every = st->update_every;
+
+ bool aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
+ bool automatic_natural_points = (points_wanted == 0);
+ bool relative_period_requested = false;
+ bool natural_points = (options & RRDR_OPTION_NATURAL_POINTS) || automatic_natural_points;
+ bool before_is_aligned_to_db_end = false;
+
+ query_debug_log_init();
+
+ // make sure points_wanted is positive
+ if(points_wanted < 0) {
+ points_wanted = -points_wanted;
+ query_debug_log(":-points_wanted %ld", points_wanted);
}
- // recalculate points_wanted using the final time-frame
- points_wanted = (before_wanted - after_wanted) / update_every / group + 1;
- if(unlikely(points_wanted < 0)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, points_wanted is %ld", st->name, points_wanted);
- #endif
- points_wanted = 0;
+ if(ABS(before_requested) <= API_RELATIVE_TIME_MAX || ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
+ relative_period_requested = true;
+ natural_points = true;
+ options |= RRDR_OPTION_NATURAL_POINTS;
+ query_debug_log(":relative+natural");
}
-#ifdef NETDATA_INTERNAL_CHECKS
- duration = before_wanted - after_wanted;
-
- if(after_wanted < first_entry_t)
- error("INTERNAL CHECK: after_wanted %u is too small, minimum %u", (uint32_t)after_wanted, (uint32_t)first_entry_t);
-
- if(after_wanted > last_entry_t)
- error("INTERNAL CHECK: after_wanted %u is too big, maximum %u", (uint32_t)after_wanted, (uint32_t)last_entry_t);
-
- if(before_wanted < first_entry_t)
- error("INTERNAL CHECK: before_wanted %u is too small, minimum %u", (uint32_t)before_wanted, (uint32_t)first_entry_t);
-
- if(before_wanted > last_entry_t)
- error("INTERNAL CHECK: before_wanted %u is too big, maximum %u", (uint32_t)before_wanted, (uint32_t)last_entry_t);
-
-/*
- if(before_slot >= (size_t)st->entries)
- error("INTERNAL CHECK: before_slot is invalid %zu, expected 0 to %ld", before_slot, st->entries - 1);
+ // if the user wants virtual points, make sure we do it
+ if(options & RRDR_OPTION_VIRTUAL_POINTS)
+ natural_points = false;
- if(after_slot >= (size_t)st->entries)
- error("INTERNAL CHECK: after_slot is invalid %zu, expected 0 to %ld", after_slot, st->entries - 1);
-*/
+ // set the right flag about natural and virtual points
+ if(natural_points) {
+ options |= RRDR_OPTION_NATURAL_POINTS;
- if(points_wanted > (before_wanted - after_wanted) / group / update_every + 1)
- error("INTERNAL CHECK: points_wanted %ld is more than points %ld", points_wanted, (before_wanted - after_wanted) / group / update_every + 1);
-
- if(group < resampling_group)
- error("INTERNAL CHECK: group %ld is less than the desired group points %ld", group, resampling_group);
-
- if(group > resampling_group && group % resampling_group)
- error("INTERNAL CHECK: group %ld is not a multiple of the desired group points %ld", group, resampling_group);
-#endif
-
- // -------------------------------------------------------------------------
- // initialize our result set
- // this also locks the chart for us
-
- RRDR *r = rrdr_create(owa, st, points_wanted, context_param_list);
- if(unlikely(!r)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL CHECK: Cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (uint32_t)duration, points_wanted);
- #endif
- return NULL;
+ if(options & RRDR_OPTION_VIRTUAL_POINTS)
+ options &= ~RRDR_OPTION_VIRTUAL_POINTS;
}
+ else {
+ options |= RRDR_OPTION_VIRTUAL_POINTS;
- if(unlikely(!r->d || !points_wanted)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL CHECK: Returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%zu, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (size_t)duration, points_wanted);
- #endif
- return r;
+ if(options & RRDR_OPTION_NATURAL_POINTS)
+ options &= ~RRDR_OPTION_NATURAL_POINTS;
}
- if(unlikely(absolute_period_requested == 1))
- r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
- else
- r->result_options |= RRDR_RESULT_OPTION_RELATIVE;
-
- // find how many dimensions we have
- long dimensions_count = r->d;
-
- // -------------------------------------------------------------------------
- // initialize RRDR
-
- r->group = group;
- r->update_every = (int)group * update_every;
- r->before = before_wanted;
- r->after = after_wanted;
- r->internal.points_wanted = points_wanted;
- r->internal.resampling_group = resampling_group;
- r->internal.resampling_divisor = resampling_divisor;
+ if(after_wanted == 0 || before_wanted == 0) {
+ // for non-context queries we have to find the duration of the database
+ // for context queries we will assume 600 seconds duration
+ if(!context_param_list) {
+ relative_period_requested = true;
- // -------------------------------------------------------------------------
- // assign the processor functions
+ rrdset_rdlock(st);
+ time_t first_entry_t = rrdset_first_entry_t_nolock(st);
+ time_t last_entry_t = rrdset_last_entry_t_nolock(st);
+ rrdset_unlock(st);
- {
- int i, found = 0;
- for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
- if(api_v1_data_groups[i].value == group_method) {
- r->internal.grouping_create= api_v1_data_groups[i].create;
- r->internal.grouping_reset = api_v1_data_groups[i].reset;
- r->internal.grouping_free = api_v1_data_groups[i].free;
- r->internal.grouping_add = api_v1_data_groups[i].add;
- r->internal.grouping_flush = api_v1_data_groups[i].flush;
- found = 1;
+ if(first_entry_t == 0 || last_entry_t == 0) {
+ internal_error(true, "QUERY: chart without data detected on '%s'", st->name);
+ query_debug_log_free();
+ return NULL;
}
- }
- if(!found) {
- errno = 0;
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: grouping method %u not found for chart '%s'. Using 'average'", (unsigned int)group_method, r->st->name);
- #endif
- r->internal.grouping_create= grouping_create_average;
- r->internal.grouping_reset = grouping_reset_average;
- r->internal.grouping_free = grouping_free_average;
- r->internal.grouping_add = grouping_add_average;
- r->internal.grouping_flush = grouping_flush_average;
- }
- }
-
- // allocate any memory required by the grouping method
- r->internal.grouping_create(r);
-
-
- // -------------------------------------------------------------------------
- // disable the not-wanted dimensions
-
- if (context_param_list && !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE))
- rrdset_check_rdlock(st);
-
- if(dimensions)
- rrdr_disable_not_selected_dimensions(r, options, dimensions, context_param_list);
-
-
- // -------------------------------------------------------------------------
- // do the work for each dimension
-
- time_t max_after = 0, min_before = 0;
- long max_rows = 0;
-
- RRDDIM *rd;
- long c, dimensions_used = 0, dimensions_nonzero = 0;
- struct timeval query_start_time;
- struct timeval query_current_time;
- if (timeout)
- now_realtime_timeval(&query_start_time);
- for(rd = temp_rd?temp_rd:st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
-
- // if we need a percentage, we need to calculate all dimensions
- if(unlikely(!(options & RRDR_OPTION_PERCENTAGE) && (r->od[c] & RRDR_DIMENSION_HIDDEN))) {
- if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED;
- continue;
- }
- r->od[c] |= RRDR_DIMENSION_SELECTED;
-
- // reset the grouping for the new dimension
- r->internal.grouping_reset(r);
-
- do_dimension_fixedstep(
- r
- , points_wanted
- , rd
- , c
- , after_wanted
- , before_wanted
- , options
- );
- if (timeout)
- now_realtime_timeval(&query_current_time);
- if(r->od[c] & RRDR_DIMENSION_NONZERO)
- dimensions_nonzero++;
+ query_debug_log(":first_entry_t %ld, last_entry_t %ld", first_entry_t, last_entry_t);
- // verify all dimensions are aligned
- if(unlikely(!dimensions_used)) {
- min_before = r->before;
- max_after = r->after;
- max_rows = r->rows;
- }
- else {
- if(r->after != max_after) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)max_after, rd->name, (size_t)r->after);
- #endif
- r->after = (r->after > max_after) ? r->after : max_after;
+ if (after_wanted == 0) {
+ after_wanted = first_entry_t;
+ query_debug_log(":zero after_wanted %lld", after_wanted);
}
- if(r->before != min_before) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)min_before, rd->name, (size_t)r->before);
- #endif
- r->before = (r->before < min_before) ? r->before : min_before;
+ if (before_wanted == 0) {
+ before_wanted = last_entry_t;
+ before_is_aligned_to_db_end = true;
+ query_debug_log(":zero before_wanted %lld", before_wanted);
}
- if(r->rows != max_rows) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)max_rows, rd->name, (size_t)r->rows);
- #endif
- r->rows = (r->rows > max_rows) ? r->rows : max_rows;
+ if(points_wanted == 0) {
+ points_wanted = (last_entry_t - first_entry_t) / update_every;
+ query_debug_log(":zero points_wanted %ld", points_wanted);
}
}
- dimensions_used++;
- if (timeout && (dt_usec(&query_start_time, &query_current_time) / 1000.0) > timeout) {
- log_access("QUERY CANCELED RUNTIME EXCEEDED %0.2f ms (LIMIT %d ms)",
- dt_usec(&query_start_time, &query_current_time) / 1000.0, timeout);
- r->result_options |= RRDR_RESULT_OPTION_CANCEL;
- break;
+ // if they are still zero, assume 600
+
+ if(after_wanted == 0) {
+ after_wanted = -600;
+ query_debug_log(":zero600 after_wanted %lld", after_wanted);
}
}
- #ifdef NETDATA_INTERNAL_CHECKS
- if (dimensions_used) {
- if(r->internal.log)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ r->internal.log);
-
- if(r->rows != points_wanted)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'points' is not wanted 'points'");
-
- if(aligned && (r->before % group) != 0)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "'before' is not aligned but alignment is required");
+ if(points_wanted == 0) {
+ points_wanted = 600;
+ query_debug_log(":zero600 points_wanted %ld", points_wanted);
+ }
- // 'after' should not be aligned, since we start inside the first group
- //if(aligned && (r->after % group) != 0)
- // rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
+ // convert our before_wanted and after_wanted to absolute
+ rrdr_relative_window_to_absolute(&after_wanted, &before_wanted);
+ query_debug_log(":relative2absolute after %lld, before %lld", after_wanted, before_wanted);
- if(r->before != before_requested)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "chart is not aligned to requested 'before'");
+ if(natural_points && (options & RRDR_OPTION_SELECTED_TIER) && tier > 0 && storage_tiers > 1) {
+ update_every = rrdset_find_natural_update_every_for_timeframe(st, after_wanted, before_wanted, points_wanted, options, tier);
+ if(update_every <= 0) update_every = st->update_every;
+ query_debug_log(":natural update every %d", update_every);
+ }
- if(r->before != before_wanted)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'before' is not wanted 'before'");
+ // this is the update_every of the query
+ // it may be different to the update_every of the database
+ time_t query_granularity = (natural_points)?update_every:1;
+ if(query_granularity <= 0) query_granularity = 1;
+ query_debug_log(":query_granularity %ld", query_granularity);
- // reported 'after' varies, depending on group
- if(r->after != after_wanted)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'after' is not wanted 'after'");
+ // align before_wanted and after_wanted to query_granularity
+ if (before_wanted % query_granularity) {
+ before_wanted -= before_wanted % query_granularity;
+ query_debug_log(":granularity align before_wanted %lld", before_wanted);
}
- #endif
- // free all resources used by the grouping method
- r->internal.grouping_free(r);
+ if (after_wanted % query_granularity) {
+ after_wanted -= after_wanted % query_granularity;
+ query_debug_log(":granularity align after_wanted %lld", after_wanted);
+ }
- // when all the dimensions are zero, we should return all of them
- if(unlikely(options & RRDR_OPTION_NONZERO && !dimensions_nonzero && !(r->result_options & RRDR_RESULT_OPTION_CANCEL))) {
- // all the dimensions are zero
- // mark them as NONZERO to send them all
- for(rd = temp_rd?temp_rd:st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
- if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- r->od[c] |= RRDR_DIMENSION_NONZERO;
- }
+ // automatic_natural_points is set when the user wants all the points available in the database
+ if(automatic_natural_points) {
+ points_wanted = (before_wanted - after_wanted + 1) / query_granularity;
+ if(unlikely(points_wanted <= 0)) points_wanted = 1;
+ query_debug_log(":auto natural points_wanted %ld", points_wanted);
}
- rrdr_query_completed(r->internal.db_points_read, r->internal.result_points_generated);
- return r;
-}
+ time_t duration = before_wanted - after_wanted;
-#ifdef ENABLE_DBENGINE
-static RRDR *rrd2rrdr_variablestep(
- ONEWAYALLOC *owa
- , RRDSET *st
- , long points_requested
- , long long after_requested
- , long long before_requested
- , RRDR_GROUPING group_method
- , long resampling_time_requested
- , RRDR_OPTIONS options
- , const char *dimensions
- , int update_every
- , time_t first_entry_t
- , time_t last_entry_t
- , int absolute_period_requested
- , struct rrdeng_region_info *region_info_array
- , struct context_param *context_param_list
- , int timeout
-) {
- int aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
+ // if the resampling time is too big, extend the duration to the past
+ if (unlikely(resampling_time_requested > duration)) {
+ after_wanted = before_wanted - resampling_time_requested;
+ duration = before_wanted - after_wanted;
+ query_debug_log(":resampling after_wanted %lld", after_wanted);
+ }
- // the duration of the chart
- time_t duration = before_requested - after_requested;
- long available_points = duration / update_every;
+ // if the duration is not aligned to resampling time
+ // extend the duration to the past, to avoid a gap at the chart
+ // only when the missing duration is above 1/10th of a point
+ if(resampling_time_requested > query_granularity && duration % resampling_time_requested) {
+ time_t delta = duration % resampling_time_requested;
+ if(delta > resampling_time_requested / 10) {
+ after_wanted -= resampling_time_requested - delta;
+ duration = before_wanted - after_wanted;
+ query_debug_log(":resampling2 after_wanted %lld", after_wanted);
+ }
+ }
- RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
+ // the available points of the query
+ long points_available = (duration + 1) / query_granularity;
+ if(unlikely(points_available <= 0)) points_available = 1;
+ query_debug_log(":points_available %ld", points_available);
- if(duration <= 0 || available_points <= 0) {
- freez(region_info_array);
- return rrdr_create(owa, st, 1, context_param_list);
+ if(points_wanted > points_available) {
+ points_wanted = points_available;
+ query_debug_log(":max points_wanted %ld", points_wanted);
}
- // check the number of wanted points in the result
- if(unlikely(points_requested < 0)) points_requested = -points_requested;
- if(unlikely(points_requested > available_points)) points_requested = available_points;
- if(unlikely(points_requested == 0)) points_requested = available_points;
-
// calculate the desired grouping of source data points
- long group = available_points / points_requested;
- if(unlikely(group <= 0)) group = 1;
- if(unlikely(available_points % points_requested > points_requested / 2)) group++; // rounding to the closest integer
-
- // resampling_time_requested enforces a certain grouping multiple
- calculated_number resampling_divisor = 1.0;
- long resampling_group = 1;
- if(unlikely(resampling_time_requested > update_every)) {
- if (unlikely(resampling_time_requested > duration)) {
- // group_time is above the available duration
-
- #ifdef NETDATA_INTERNAL_CHECKS
- info("INTERNAL CHECK: %s: requested gtime %ld secs, is greater than the desired duration %ld secs", st->id, resampling_time_requested, duration);
- #endif
-
- after_requested = before_requested - resampling_time_requested;
- duration = before_requested - after_requested;
- available_points = duration / update_every;
- group = available_points / points_requested;
- }
-
- // if the duration is not aligned to resampling time
- // extend the duration to the past, to avoid a gap at the chart
- // only when the missing duration is above 1/10th of a point
- if(duration % resampling_time_requested) {
- time_t delta = duration % resampling_time_requested;
- if(delta > resampling_time_requested / 10) {
- after_requested -= resampling_time_requested - delta;
- duration = before_requested - after_requested;
- available_points = duration / update_every;
- group = available_points / points_requested;
- }
- }
+ long group = points_available / points_wanted;
+ if(group <= 0) group = 1;
- // the points we should group to satisfy gtime
- resampling_group = resampling_time_requested / update_every;
- if(unlikely(resampling_time_requested % update_every)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- info("INTERNAL CHECK: %s: requested gtime %ld secs, is not a multiple of the chart's data collection frequency %d secs", st->id, resampling_time_requested, update_every);
- #endif
-
- resampling_group++;
- }
+ // round "group" to the closest integer
+ if(points_available % points_wanted > points_wanted / 2)
+ group++;
- // adapt group according to resampling_group
- if(unlikely(group < resampling_group)) group = resampling_group; // do not allow grouping below the desired one
- if(unlikely(group % resampling_group)) group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
+ query_debug_log(":group %ld", group);
- //resampling_divisor = group / resampling_group;
- resampling_divisor = (calculated_number)(group * update_every) / (calculated_number)resampling_time_requested;
- }
+ if(points_wanted * group * query_granularity < duration) {
+ // the grouping we are going to do, is not enough
+ // to cover the entire duration requested, so
+ // we have to change the number of points, to make sure we will
+ // respect the timeframe as closely as possibly
- // now that we have group,
- // align the requested timeframe to fit it.
+ // let's see how many points are the optimal
+ points_wanted = points_available / group;
- if(aligned) {
- // alignment has been requested, so align the values
- before_requested -= before_requested % (group * update_every);
- after_requested -= after_requested % (group * update_every);
- }
+ if(points_wanted * group < points_available)
+ points_wanted++;
- // we align the request on requested_before
- time_t before_wanted = before_requested;
- if(likely(before_wanted > last_entry_t)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, before_wanted is after db max", st->name);
- #endif
+ if(unlikely(points_wanted <= 0))
+ points_wanted = 1;
- before_wanted = last_entry_t - (last_entry_t % ( ((aligned)?group:1) * update_every ));
+ query_debug_log(":optimal points %ld", points_wanted);
}
- //size_t before_slot = rrdset_time2slot(st, before_wanted);
-
- // we need to estimate the number of points, for having
- // an integer number of values per point
- long points_wanted = (before_wanted - after_requested) / (update_every * group);
-
- time_t after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
- if(unlikely(after_wanted < first_entry_t)) {
- // hm... we go to the past, calculate again points_wanted using all the db from before_wanted to the beginning
- points_wanted = (before_wanted - first_entry_t) / group;
- // recalculate after wanted with the new number of points
- after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
+ // resampling_time_requested enforces a certain grouping multiple
+ NETDATA_DOUBLE resampling_divisor = 1.0;
+ long resampling_group = 1;
+ if(unlikely(resampling_time_requested > query_granularity)) {
+ // the points we should group to satisfy gtime
+ resampling_group = resampling_time_requested / query_granularity;
+ if(unlikely(resampling_time_requested % query_granularity))
+ resampling_group++;
- if(unlikely(after_wanted < first_entry_t)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, after_wanted is before db min", st->name);
- #endif
+ query_debug_log(":resampling group %ld", resampling_group);
- after_wanted = first_entry_t - (first_entry_t % ( ((aligned)?group:1) * update_every )) + ( ((aligned)?group:1) * update_every );
+ // adapt group according to resampling_group
+ if(unlikely(group < resampling_group)) {
+ group = resampling_group; // do not allow grouping below the desired one
+ query_debug_log(":group less res %ld", group);
+ }
+ if(unlikely(group % resampling_group)) {
+ group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
+ query_debug_log(":group mod res %ld", group);
}
- }
- //size_t after_slot = rrdset_time2slot(st, after_wanted);
-
- // check if they are reversed
- if(unlikely(after_wanted > before_wanted)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, reversed wanted after/before", st->name);
- #endif
- time_t tmp = before_wanted;
- before_wanted = after_wanted;
- after_wanted = tmp;
- }
- // recalculate points_wanted using the final time-frame
- points_wanted = (before_wanted - after_wanted) / update_every / group + 1;
- if(unlikely(points_wanted < 0)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, points_wanted is %ld", st->name, points_wanted);
- #endif
- points_wanted = 0;
+ // resampling_divisor = group / resampling_group;
+ resampling_divisor = (NETDATA_DOUBLE)(group * query_granularity) / (NETDATA_DOUBLE)resampling_time_requested;
+ query_debug_log(":resampling divisor " NETDATA_DOUBLE_FORMAT, resampling_divisor);
}
-#ifdef NETDATA_INTERNAL_CHECKS
- duration = before_wanted - after_wanted;
-
- if(after_wanted < first_entry_t)
- error("INTERNAL CHECK: after_wanted %u is too small, minimum %u", (uint32_t)after_wanted, (uint32_t)first_entry_t);
-
- if(after_wanted > last_entry_t)
- error("INTERNAL CHECK: after_wanted %u is too big, maximum %u", (uint32_t)after_wanted, (uint32_t)last_entry_t);
+ // now that we have group, align the requested timeframe to fit it.
+ if(aligned && before_wanted % (group * query_granularity)) {
+ if(before_is_aligned_to_db_end)
+ before_wanted -= before_wanted % (group * query_granularity);
+ else
+ before_wanted += (group * query_granularity) - before_wanted % (group * query_granularity);
+ query_debug_log(":align before_wanted %lld", before_wanted);
+ }
- if(before_wanted < first_entry_t)
- error("INTERNAL CHECK: before_wanted %u is too small, minimum %u", (uint32_t)before_wanted, (uint32_t)first_entry_t);
+ after_wanted = before_wanted - (points_wanted * group * query_granularity) + query_granularity;
+ query_debug_log(":final after_wanted %lld", after_wanted);
- if(before_wanted > last_entry_t)
- error("INTERNAL CHECK: before_wanted %u is too big, maximum %u", (uint32_t)before_wanted, (uint32_t)last_entry_t);
+ duration = before_wanted - after_wanted;
+ query_debug_log(":final duration %ld", duration + 1);
-/*
- if(before_slot >= (size_t)st->entries)
- error("INTERNAL CHECK: before_slot is invalid %zu, expected 0 to %ld", before_slot, st->entries - 1);
+ // check the context query based on the starting time of the query
+ if (context_param_list && !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE)) {
+ rebuild_context_param_list(owa, context_param_list, after_wanted);
+ st = context_param_list->rd ? context_param_list->rd->rrdset : NULL;
- if(after_slot >= (size_t)st->entries)
- error("INTERNAL CHECK: after_slot is invalid %zu, expected 0 to %ld", after_slot, st->entries - 1);
-*/
+ if(unlikely(!st))
+ return NULL;
+ }
- if(points_wanted > (before_wanted - after_wanted) / group / update_every + 1)
- error("INTERNAL CHECK: points_wanted %ld is more than points %ld", points_wanted, (before_wanted - after_wanted) / group / update_every + 1);
+ internal_error(points_wanted != duration / (query_granularity * group) + 1,
+ "QUERY: points_wanted %ld is not points %ld",
+ points_wanted, duration / (query_granularity * group) + 1);
- if(group < resampling_group)
- error("INTERNAL CHECK: group %ld is less than the desired group points %ld", group, resampling_group);
+ internal_error(group < resampling_group,
+ "QUERY: group %ld is less than the desired group points %ld",
+ group, resampling_group);
- if(group > resampling_group && group % resampling_group)
- error("INTERNAL CHECK: group %ld is not a multiple of the desired group points %ld", group, resampling_group);
-#endif
+ internal_error(group > resampling_group && group % resampling_group,
+ "QUERY: group %ld is not a multiple of the desired group points %ld",
+ group, resampling_group);
// -------------------------------------------------------------------------
// initialize our result set
@@ -1438,26 +1931,21 @@ static RRDR *rrd2rrdr_variablestep(
RRDR *r = rrdr_create(owa, st, points_wanted, context_param_list);
if(unlikely(!r)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL CHECK: Cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (uint32_t)duration, points_wanted);
- #endif
- freez(region_info_array);
+ internal_error(true, "QUERY: cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld",
+ st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (uint32_t)duration, points_wanted);
return NULL;
}
if(unlikely(!r->d || !points_wanted)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL CHECK: Returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%zu, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (size_t)duration, points_wanted);
- #endif
- freez(region_info_array);
+ internal_error(true, "QUERY: returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%zu, points=%ld",
+ st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (size_t)duration, points_wanted);
return r;
}
- r->result_options |= RRDR_RESULT_OPTION_VARIABLE_STEP;
- if(unlikely(absolute_period_requested == 1))
- r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
- else
+ if(relative_period_requested)
r->result_options |= RRDR_RESULT_OPTION_RELATIVE;
+ else
+ r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
// find how many dimensions we have
long dimensions_count = r->d;
@@ -1466,48 +1954,26 @@ static RRDR *rrd2rrdr_variablestep(
// initialize RRDR
r->group = group;
- r->update_every = (int)group * update_every;
+ r->update_every = (int)(group * query_granularity);
r->before = before_wanted;
r->after = after_wanted;
r->internal.points_wanted = points_wanted;
r->internal.resampling_group = resampling_group;
r->internal.resampling_divisor = resampling_divisor;
-
+ r->internal.query_options = options;
+ r->internal.query_tier = tier;
// -------------------------------------------------------------------------
// assign the processor functions
-
- {
- int i, found = 0;
- for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
- if(api_v1_data_groups[i].value == group_method) {
- r->internal.grouping_create= api_v1_data_groups[i].create;
- r->internal.grouping_reset = api_v1_data_groups[i].reset;
- r->internal.grouping_free = api_v1_data_groups[i].free;
- r->internal.grouping_add = api_v1_data_groups[i].add;
- r->internal.grouping_flush = api_v1_data_groups[i].flush;
- found = 1;
- }
- }
- if(!found) {
- errno = 0;
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: grouping method %u not found for chart '%s'. Using 'average'", (unsigned int)group_method, r->st->name);
- #endif
- r->internal.grouping_create= grouping_create_average;
- r->internal.grouping_reset = grouping_reset_average;
- r->internal.grouping_free = grouping_free_average;
- r->internal.grouping_add = grouping_add_average;
- r->internal.grouping_flush = grouping_flush_average;
- }
- }
+ rrdr_set_grouping_function(r, group_method);
// allocate any memory required by the grouping method
- r->internal.grouping_create(r);
+ r->internal.grouping_create(r, group_options);
// -------------------------------------------------------------------------
// disable the not-wanted dimensions
+
if (context_param_list && !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE))
rrdset_check_rdlock(st);
@@ -1515,19 +1981,22 @@ static RRDR *rrd2rrdr_variablestep(
rrdr_disable_not_selected_dimensions(r, options, dimensions, context_param_list);
+ query_debug_log_fin();
+
// -------------------------------------------------------------------------
// do the work for each dimension
time_t max_after = 0, min_before = 0;
long max_rows = 0;
+ RRDDIM *first_rd = context_param_list ? context_param_list->rd : st->dimensions;
RRDDIM *rd;
long c, dimensions_used = 0, dimensions_nonzero = 0;
struct timeval query_start_time;
struct timeval query_current_time;
- if (timeout)
- now_realtime_timeval(&query_start_time);
- for(rd = temp_rd?temp_rd:st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
+ if (timeout) now_realtime_timeval(&query_start_time);
+
+ for(rd = first_rd, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
// if we need a percentage, we need to calculate all dimensions
if(unlikely(!(options & RRDR_OPTION_PERCENTAGE) && (r->od[c] & RRDR_DIMENSION_HIDDEN))) {
@@ -1539,15 +2008,7 @@ static RRDR *rrd2rrdr_variablestep(
// reset the grouping for the new dimension
r->internal.grouping_reset(r);
- do_dimension_variablestep(
- r
- , points_wanted
- , rd
- , c
- , after_wanted
- , before_wanted
- , options
- );
+ rrd2rrdr_do_dimension(r, points_wanted, rd, c, after_wanted, before_wanted);
if (timeout)
now_realtime_timeval(&query_current_time);
@@ -1562,66 +2023,81 @@ static RRDR *rrd2rrdr_variablestep(
}
else {
if(r->after != max_after) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)max_after, rd->name, (size_t)r->after);
- #endif
+ internal_error(true, "QUERY: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)max_after, rd->name, (size_t)r->after);
+
r->after = (r->after > max_after) ? r->after : max_after;
}
if(r->before != min_before) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)min_before, rd->name, (size_t)r->before);
- #endif
+ internal_error(true, "QUERY: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)min_before, rd->name, (size_t)r->before);
+
r->before = (r->before < min_before) ? r->before : min_before;
}
if(r->rows != max_rows) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)max_rows, rd->name, (size_t)r->rows);
- #endif
+ internal_error(true, "QUERY: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)max_rows, rd->name, (size_t)r->rows);
+
r->rows = (r->rows > max_rows) ? r->rows : max_rows;
}
}
dimensions_used++;
- if (timeout && (dt_usec(&query_start_time, &query_current_time) / 1000.0) > timeout) {
+ if (timeout && ((NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0) > timeout) {
log_access("QUERY CANCELED RUNTIME EXCEEDED %0.2f ms (LIMIT %d ms)",
- dt_usec(&query_start_time, &query_current_time) / 1000.0, timeout);
+ (NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0, timeout);
r->result_options |= RRDR_RESULT_OPTION_CANCEL;
break;
}
}
- #ifdef NETDATA_INTERNAL_CHECKS
-
+#ifdef NETDATA_INTERNAL_CHECKS
if (dimensions_used) {
if(r->internal.log)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ r->internal.log);
+ rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
+ after_wanted, after_requested, before_wanted, before_requested,
+ points_requested, points_wanted, /*after_slot, before_slot,*/
+ r->internal.log);
if(r->rows != points_wanted)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'points' is not wanted 'points'");
+ rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
+ after_wanted, after_requested, before_wanted, before_requested,
+ points_requested, points_wanted, /*after_slot, before_slot,*/
+ "got 'points' is not wanted 'points'");
- if(aligned && (r->before % group) != 0)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "'before' is not aligned but alignment is required");
+ if(aligned && (r->before % (group * query_granularity)) != 0)
+ rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
+ after_wanted, after_requested, before_wanted,before_wanted,
+ points_requested, points_wanted, /*after_slot, before_slot,*/
+ "'before' is not aligned but alignment is required");
// 'after' should not be aligned, since we start inside the first group
//if(aligned && (r->after % group) != 0)
- // rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
+ // rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
- if(r->before != before_requested)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "chart is not aligned to requested 'before'");
+ if(r->before != before_wanted)
+ rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
+ after_wanted, after_requested, before_wanted, before_requested,
+ points_requested, points_wanted, /*after_slot, before_slot,*/
+ "chart is not aligned to requested 'before'");
if(r->before != before_wanted)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'before' is not wanted 'before'");
+ rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
+ after_wanted, after_requested, before_wanted, before_requested,
+ points_requested, points_wanted, /*after_slot, before_slot,*/
+ "got 'before' is not wanted 'before'");
// reported 'after' varies, depending on group
if(r->after != after_wanted)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'after' is not wanted 'after'");
+ rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
+ after_wanted, after_requested, before_wanted, before_requested,
+ points_requested, points_wanted, /*after_slot, before_slot,*/
+ "got 'after' is not wanted 'after'");
+
}
- #endif
+#endif
// free all resources used by the grouping method
r->internal.grouping_free(r);
@@ -1630,99 +2106,12 @@ static RRDR *rrd2rrdr_variablestep(
if(unlikely(options & RRDR_OPTION_NONZERO && !dimensions_nonzero && !(r->result_options & RRDR_RESULT_OPTION_CANCEL))) {
// all the dimensions are zero
// mark them as NONZERO to send them all
- for(rd = temp_rd?temp_rd:st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
+ for(rd = first_rd, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
r->od[c] |= RRDR_DIMENSION_NONZERO;
}
}
rrdr_query_completed(r->internal.db_points_read, r->internal.result_points_generated);
- freez(region_info_array);
return r;
}
-#endif //#ifdef ENABLE_DBENGINE
-
-RRDR *rrd2rrdr(
- ONEWAYALLOC *owa
- , RRDSET *st
- , long points_requested
- , long long after_requested
- , long long before_requested
- , RRDR_GROUPING group_method
- , long resampling_time_requested
- , RRDR_OPTIONS options
- , const char *dimensions
- , struct context_param *context_param_list
- , int timeout
-)
-{
- int rrd_update_every;
- int absolute_period_requested;
-
- time_t first_entry_t;
- time_t last_entry_t;
- if (context_param_list) {
- first_entry_t = context_param_list->first_entry_t;
- last_entry_t = context_param_list->last_entry_t;
- } else {
- rrdset_rdlock(st);
- first_entry_t = rrdset_first_entry_t_nolock(st);
- last_entry_t = rrdset_last_entry_t_nolock(st);
- rrdset_unlock(st);
- }
-
- rrd_update_every = st->update_every;
- absolute_period_requested = rrdr_convert_before_after_to_absolute(&after_requested, &before_requested,
- rrd_update_every, first_entry_t,
- last_entry_t, options);
- if (options & RRDR_OPTION_ALLOW_PAST)
- if (first_entry_t > after_requested)
- first_entry_t = after_requested;
-
- if (context_param_list && !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE)) {
- rebuild_context_param_list(owa, context_param_list, after_requested);
- st = context_param_list->rd ? context_param_list->rd->rrdset : NULL;
- if (unlikely(!st))
- return NULL;
- }
-
-#ifdef ENABLE_DBENGINE
- if (st->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
- struct rrdeng_region_info *region_info_array;
- unsigned regions, max_interval;
-
- /* This call takes the chart read-lock */
- regions = rrdeng_variable_step_boundaries(st, after_requested, before_requested,
- &region_info_array, &max_interval, context_param_list);
- if (1 == regions) {
- if (region_info_array) {
- if (rrd_update_every != region_info_array[0].update_every) {
- rrd_update_every = region_info_array[0].update_every;
- /* recalculate query alignment */
- absolute_period_requested =
- rrdr_convert_before_after_to_absolute(&after_requested, &before_requested, rrd_update_every,
- first_entry_t, last_entry_t, options);
- }
- freez(region_info_array);
- }
- return rrd2rrdr_fixedstep(owa, st, points_requested, after_requested, before_requested, group_method,
- resampling_time_requested, options, dimensions, rrd_update_every,
- first_entry_t, last_entry_t, absolute_period_requested, context_param_list, timeout);
- } else {
- if (rrd_update_every != (uint16_t)max_interval) {
- rrd_update_every = (uint16_t) max_interval;
- /* recalculate query alignment */
- absolute_period_requested = rrdr_convert_before_after_to_absolute(&after_requested, &before_requested,
- rrd_update_every, first_entry_t,
- last_entry_t, options);
- }
- return rrd2rrdr_variablestep(owa, st, points_requested, after_requested, before_requested, group_method,
- resampling_time_requested, options, dimensions, rrd_update_every,
- first_entry_t, last_entry_t, absolute_period_requested, region_info_array, context_param_list, timeout);
- }
- }
-#endif
- return rrd2rrdr_fixedstep(owa, st, points_requested, after_requested, before_requested, group_method,
- resampling_time_requested, options, dimensions,
- rrd_update_every, first_entry_t, last_entry_t, absolute_period_requested, context_param_list, timeout);
-}
diff --git a/web/api/queries/query.h b/web/api/queries/query.h
index 6b8a51c58..df876d9ac 100644
--- a/web/api/queries/query.h
+++ b/web/api/queries/query.h
@@ -3,6 +3,10 @@
#ifndef NETDATA_API_DATA_QUERY_H
#define NETDATA_API_DATA_QUERY_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef enum rrdr_grouping {
RRDR_GROUPING_UNDEFINED = 0,
RRDR_GROUPING_AVERAGE,
@@ -10,15 +14,46 @@ typedef enum rrdr_grouping {
RRDR_GROUPING_MAX,
RRDR_GROUPING_SUM,
RRDR_GROUPING_INCREMENTAL_SUM,
+ RRDR_GROUPING_TRIMMED_MEAN1,
+ RRDR_GROUPING_TRIMMED_MEAN2,
+ RRDR_GROUPING_TRIMMED_MEAN3,
+ RRDR_GROUPING_TRIMMED_MEAN5,
+ RRDR_GROUPING_TRIMMED_MEAN10,
+ RRDR_GROUPING_TRIMMED_MEAN15,
+ RRDR_GROUPING_TRIMMED_MEAN20,
+ RRDR_GROUPING_TRIMMED_MEAN25,
RRDR_GROUPING_MEDIAN,
+ RRDR_GROUPING_TRIMMED_MEDIAN1,
+ RRDR_GROUPING_TRIMMED_MEDIAN2,
+ RRDR_GROUPING_TRIMMED_MEDIAN3,
+ RRDR_GROUPING_TRIMMED_MEDIAN5,
+ RRDR_GROUPING_TRIMMED_MEDIAN10,
+ RRDR_GROUPING_TRIMMED_MEDIAN15,
+ RRDR_GROUPING_TRIMMED_MEDIAN20,
+ RRDR_GROUPING_TRIMMED_MEDIAN25,
+ RRDR_GROUPING_PERCENTILE25,
+ RRDR_GROUPING_PERCENTILE50,
+ RRDR_GROUPING_PERCENTILE75,
+ RRDR_GROUPING_PERCENTILE80,
+ RRDR_GROUPING_PERCENTILE90,
+ RRDR_GROUPING_PERCENTILE95,
+ RRDR_GROUPING_PERCENTILE97,
+ RRDR_GROUPING_PERCENTILE98,
+ RRDR_GROUPING_PERCENTILE99,
RRDR_GROUPING_STDDEV,
RRDR_GROUPING_CV,
RRDR_GROUPING_SES,
RRDR_GROUPING_DES,
+ RRDR_GROUPING_COUNTIF,
} RRDR_GROUPING;
extern const char *group_method2string(RRDR_GROUPING group);
extern void web_client_api_v1_init_grouping(void);
extern RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def);
+extern const char *web_client_api_request_v1_data_group_to_string(RRDR_GROUPING group);
+
+#ifdef __cplusplus
+}
+#endif
#endif //NETDATA_API_DATA_QUERY_H
diff --git a/web/api/queries/rrdr.c b/web/api/queries/rrdr.c
index 4d05778c1..ecf4ca2ac 100644
--- a/web/api/queries/rrdr.c
+++ b/web/api/queries/rrdr.c
@@ -30,7 +30,7 @@ static void rrdr_dump(RRDR *r)
// for each line in the array
for(i = 0; i < r->rows ;i++) {
- calculated_number *cn = &r->v[ i * r->d ];
+ NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
RRDR_DIMENSION_FLAGS *co = &r->o[ i * r->d ];
// print the id and the timestamp of the line
@@ -44,7 +44,7 @@ static void rrdr_dump(RRDR *r)
if(co[c] & RRDR_EMPTY)
fprintf(stderr, "null ");
else
- fprintf(stderr, CALCULATED_NUMBER_FORMAT " %s%s%s%s "
+ fprintf(stderr, NETDATA_DOUBLE_FORMAT " %s%s%s%s "
, cn[c]
, (co[c] & RRDR_EMPTY)?"E":" "
, (co[c] & RRDR_RESET)?"R":" "
@@ -58,78 +58,65 @@ static void rrdr_dump(RRDR *r)
}
*/
+inline void rrdr_free(ONEWAYALLOC *owa, RRDR *r) {
+ if(unlikely(!r)) return;
-
-
-inline static void rrdr_lock_rrdset(RRDR *r) {
- if(unlikely(!r)) {
- error("NULL value given!");
- return;
- }
-
- rrdset_rdlock(r->st);
- r->has_st_lock = 1;
-}
-
-inline static void rrdr_unlock_rrdset(RRDR *r) {
- if(unlikely(!r)) {
- error("NULL value given!");
- return;
- }
-
- if(likely(r->has_st_lock)) {
- r->has_st_lock = 0;
+ if(likely(r->st_locked_by_rrdr_create))
rrdset_unlock(r->st);
- }
-}
-
-inline void rrdr_free(ONEWAYALLOC *owa, RRDR *r)
-{
- if(unlikely(!r)) {
- error("NULL value given!");
- return;
- }
- rrdr_unlock_rrdset(r);
onewayalloc_freez(owa, r->t);
onewayalloc_freez(owa, r->v);
onewayalloc_freez(owa, r->o);
onewayalloc_freez(owa, r->od);
+ onewayalloc_freez(owa, r->ar);
onewayalloc_freez(owa, r);
}
-RRDR *rrdr_create(ONEWAYALLOC *owa, struct rrdset *st, long n, struct context_param *context_param_list)
-{
- if (unlikely(!st)) {
- error("NULL value given!");
- return NULL;
- }
-
+RRDR *rrdr_create_for_x_dimensions(ONEWAYALLOC *owa, int dimensions, long points) {
RRDR *r = onewayalloc_callocz(owa, 1, sizeof(RRDR));
- r->st = st;
+ r->internal.owa = owa;
+
+ r->d = dimensions;
+ r->n = points;
+
+ r->t = onewayalloc_callocz(owa, points, sizeof(time_t));
+ r->v = onewayalloc_mallocz(owa, points * dimensions * sizeof(NETDATA_DOUBLE));
+ r->o = onewayalloc_mallocz(owa, points * dimensions * sizeof(RRDR_VALUE_FLAGS));
+ r->ar = onewayalloc_mallocz(owa, points * dimensions * sizeof(NETDATA_DOUBLE));
+ r->od = onewayalloc_mallocz(owa, dimensions * sizeof(RRDR_DIMENSION_FLAGS));
+ r->group = 1;
+ r->update_every = 1;
+
+ return r;
+}
+
+RRDR *rrdr_create(ONEWAYALLOC *owa, struct rrdset *st, long n, struct context_param *context_param_list) {
+ if (unlikely(!st)) return NULL;
+
+ bool st_locked_by_rrdr_create = false;
if (!context_param_list || !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE)) {
- rrdr_lock_rrdset(r);
- r->st_needs_lock = 1;
+ rrdset_rdlock(st);
+ st_locked_by_rrdr_create = true;
}
+ // count the number of dimensions
+ int dimensions = 0;
RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
RRDDIM *rd;
if (temp_rd) {
RRDDIM *t = temp_rd;
while (t) {
- r->d++;
+ dimensions++;
t = t->next;
}
} else
- rrddim_foreach_read(rd, st) r->d++;
-
- r->n = n;
+ rrddim_foreach_read(rd, st) dimensions++;
- r->t = onewayalloc_callocz(owa, (size_t)n, sizeof(time_t));
- r->v = onewayalloc_mallocz(owa, n * r->d * sizeof(calculated_number));
- r->o = onewayalloc_mallocz(owa, n * r->d * sizeof(RRDR_VALUE_FLAGS));
- r->od = onewayalloc_mallocz(owa, r->d * sizeof(RRDR_DIMENSION_FLAGS));
+ // create the rrdr
+ RRDR *r = rrdr_create_for_x_dimensions(owa, dimensions, n);
+ r->st = st;
+ r->st_locked_by_rrdr_create = st_locked_by_rrdr_create;
// set the hidden flag on hidden dimensions
int c;
@@ -140,8 +127,5 @@ RRDR *rrdr_create(ONEWAYALLOC *owa, struct rrdset *st, long n, struct context_pa
r->od[c] = RRDR_DIMENSION_DEFAULT;
}
- r->group = 1;
- r->update_every = 1;
-
return r;
}
diff --git a/web/api/queries/rrdr.h b/web/api/queries/rrdr.h
index 87ba6c86b..1c80e103f 100644
--- a/web/api/queries/rrdr.h
+++ b/web/api/queries/rrdr.h
@@ -4,27 +4,46 @@
#define NETDATA_QUERIES_RRDR_H
#include "libnetdata/libnetdata.h"
+#include "web/api/queries/query.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum tier_query_fetch {
+ TIER_QUERY_FETCH_SUM,
+ TIER_QUERY_FETCH_MIN,
+ TIER_QUERY_FETCH_MAX,
+ TIER_QUERY_FETCH_AVERAGE
+} TIER_QUERY_FETCH;
typedef enum rrdr_options {
- RRDR_OPTION_NONZERO = 0x00000001, // don't output dimensions with just zero values
- RRDR_OPTION_REVERSED = 0x00000002, // output the rows in reverse order (oldest to newest)
- RRDR_OPTION_ABSOLUTE = 0x00000004, // values positive, for DATASOURCE_SSV before summing
- RRDR_OPTION_MIN2MAX = 0x00000008, // when adding dimensions, use max - min, instead of sum
- RRDR_OPTION_SECONDS = 0x00000010, // output seconds, instead of dates
- RRDR_OPTION_MILLISECONDS = 0x00000020, // output milliseconds, instead of dates
- RRDR_OPTION_NULL2ZERO = 0x00000040, // do not show nulls, convert them to zeros
- RRDR_OPTION_OBJECTSROWS = 0x00000080, // each row of values should be an object, not an array
- RRDR_OPTION_GOOGLE_JSON = 0x00000100, // comply with google JSON/JSONP specs
- RRDR_OPTION_JSON_WRAP = 0x00000200, // wrap the response in a JSON header with info about the result
- RRDR_OPTION_LABEL_QUOTES = 0x00000400, // in CSV output, wrap header labels in double quotes
- RRDR_OPTION_PERCENTAGE = 0x00000800, // give values as percentage of total
- RRDR_OPTION_NOT_ALIGNED = 0x00001000, // do not align charts for persistent timeframes
- RRDR_OPTION_DISPLAY_ABS = 0x00002000, // for badges, display the absolute value, but calculate colors with sign
- RRDR_OPTION_MATCH_IDS = 0x00004000, // when filtering dimensions, match only IDs
- RRDR_OPTION_MATCH_NAMES = 0x00008000, // when filtering dimensions, match only names
- RRDR_OPTION_CUSTOM_VARS = 0x00010000, // when wrapping response in a JSON, return custom variables in response
- RRDR_OPTION_ALLOW_PAST = 0x00020000, // The after parameter can extend in the past before the first entry
- RRDR_OPTION_ANOMALY_BIT = 0x00040000, // Return the anomaly bit stored in each collected_number
+ RRDR_OPTION_NONZERO = 0x00000001, // don't output dimensions with just zero values
+ RRDR_OPTION_REVERSED = 0x00000002, // output the rows in reverse order (oldest to newest)
+ RRDR_OPTION_ABSOLUTE = 0x00000004, // values positive, for DATASOURCE_SSV before summing
+ RRDR_OPTION_MIN2MAX = 0x00000008, // when adding dimensions, use max - min, instead of sum
+ RRDR_OPTION_SECONDS = 0x00000010, // output seconds, instead of dates
+ RRDR_OPTION_MILLISECONDS = 0x00000020, // output milliseconds, instead of dates
+ RRDR_OPTION_NULL2ZERO = 0x00000040, // do not show nulls, convert them to zeros
+ RRDR_OPTION_OBJECTSROWS = 0x00000080, // each row of values should be an object, not an array
+ RRDR_OPTION_GOOGLE_JSON = 0x00000100, // comply with google JSON/JSONP specs
+ RRDR_OPTION_JSON_WRAP = 0x00000200, // wrap the response in a JSON header with info about the result
+ RRDR_OPTION_LABEL_QUOTES = 0x00000400, // in CSV output, wrap header labels in double quotes
+ RRDR_OPTION_PERCENTAGE = 0x00000800, // give values as percentage of total
+ RRDR_OPTION_NOT_ALIGNED = 0x00001000, // do not align charts for persistent timeframes
+ RRDR_OPTION_DISPLAY_ABS = 0x00002000, // for badges, display the absolute value, but calculate colors with sign
+ RRDR_OPTION_MATCH_IDS = 0x00004000, // when filtering dimensions, match only IDs
+ RRDR_OPTION_MATCH_NAMES = 0x00008000, // when filtering dimensions, match only names
+ RRDR_OPTION_CUSTOM_VARS = 0x00010000, // when wrapping response in a JSON, return custom variables in response
+ RRDR_OPTION_NATURAL_POINTS = 0x00020000, // return the natural points of the database
+ RRDR_OPTION_VIRTUAL_POINTS = 0x00040000, // return virtual points
+ RRDR_OPTION_ANOMALY_BIT = 0x00080000, // Return the anomaly bit stored in each collected_number
+ RRDR_OPTION_RETURN_RAW = 0x00100000, // Return raw data for aggregating across multiple nodes
+ RRDR_OPTION_RETURN_JWAR = 0x00200000, // Return anomaly rates in jsonwrap
+ RRDR_OPTION_SELECTED_TIER = 0x00400000, // Use the selected tier for the query
+
+ // internal ones - not to be exposed to the API
+ RRDR_OPTION_INTERNAL_AR = 0x10000000, // internal use only, to let the formatters we want to render the anomaly rate
} RRDR_OPTIONS;
typedef enum rrdr_value_flag {
@@ -62,40 +81,46 @@ typedef struct rrdresult {
RRDR_DIMENSION_FLAGS *od; // the options for the dimensions
time_t *t; // array of n timestamps
- calculated_number *v; // array n x d values
+ NETDATA_DOUBLE *v; // array n x d values
RRDR_VALUE_FLAGS *o; // array n x d options for each value returned
+ NETDATA_DOUBLE *ar; // array n x d of anomaly rates (0 - 100)
long group; // how many collected values were grouped for each row
int update_every; // what is the suggested update frequency in seconds
- calculated_number min;
- calculated_number max;
+ NETDATA_DOUBLE min;
+ NETDATA_DOUBLE max;
time_t before;
time_t after;
- int has_st_lock; // if st is read locked by us
- uint8_t st_needs_lock; // if ST should be locked
+ bool st_locked_by_rrdr_create; // if st is read locked by us
// internal rrd2rrdr() members below this point
struct {
+ int query_tier; // the selected tier
+ RRDR_OPTIONS query_options; // RRDR_OPTION_* (as run by the query)
+
long points_wanted;
long resampling_group;
- calculated_number resampling_divisor;
+ NETDATA_DOUBLE resampling_divisor;
- void (*grouping_create)(struct rrdresult *r);
+ void (*grouping_create)(struct rrdresult *r, const char *options);
void (*grouping_reset)(struct rrdresult *r);
void (*grouping_free)(struct rrdresult *r);
- void (*grouping_add)(struct rrdresult *r, calculated_number value);
- calculated_number (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+ void (*grouping_add)(struct rrdresult *r, NETDATA_DOUBLE value);
+ NETDATA_DOUBLE (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
void *grouping_data;
+ TIER_QUERY_FETCH tier_query_fetch;
#ifdef NETDATA_INTERNAL_CHECKS
const char *log;
#endif
size_t db_points_read;
size_t result_points_generated;
+ size_t tier_points_read[RRD_STORAGE_TIERS];
+ ONEWAYALLOC *owa;
} internal;
} RRDR;
@@ -104,16 +129,21 @@ typedef struct rrdresult {
#include "database/rrd.h"
extern void rrdr_free(ONEWAYALLOC *owa, RRDR *r);
extern RRDR *rrdr_create(ONEWAYALLOC *owa, struct rrdset *st, long n, struct context_param *context_param_list);
+extern RRDR *rrdr_create_for_x_dimensions(ONEWAYALLOC *owa, int dimensions, long points);
#include "../web_api_v1.h"
#include "web/api/queries/query.h"
extern RRDR *rrd2rrdr(
ONEWAYALLOC *owa,
- RRDSET *st, long points_requested, long long after_requested, long long before_requested,
+ RRDSET *st, long points_wanted, long long after_wanted, long long before_wanted,
RRDR_GROUPING group_method, long resampling_time_requested, RRDR_OPTIONS options, const char *dimensions,
- struct context_param *context_param_list, int timeout);
+ struct context_param *context_param_list, const char *group_options, int timeout, int tier);
+
+extern int rrdr_relative_window_to_absolute(long long *after, long long *before);
-#include "query.h"
+#ifdef __cplusplus
+}
+#endif
#endif //NETDATA_QUERIES_RRDR_H
diff --git a/web/api/queries/ses/ses.c b/web/api/queries/ses/ses.c
index ae4a0fa0d..5e94002c3 100644
--- a/web/api/queries/ses/ses.c
+++ b/web/api/queries/ses/ses.c
@@ -7,9 +7,9 @@
// single exponential smoothing
struct grouping_ses {
- calculated_number alpha;
- calculated_number alpha_other;
- calculated_number level;
+ NETDATA_DOUBLE alpha;
+ NETDATA_DOUBLE alpha_other;
+ NETDATA_DOUBLE level;
size_t count;
};
@@ -25,20 +25,20 @@ void grouping_init_ses(void) {
}
}
-static inline calculated_number window(RRDR *r, struct grouping_ses *g) {
+static inline NETDATA_DOUBLE window(RRDR *r, struct grouping_ses *g) {
(void)g;
- calculated_number points;
+ NETDATA_DOUBLE points;
if(r->group == 1) {
// provide a running DES
- points = r->internal.points_wanted;
+ points = (NETDATA_DOUBLE)r->internal.points_wanted;
}
else {
// provide a SES with flush points
- points = r->group;
+ points = (NETDATA_DOUBLE)r->group;
}
- return (points > max_window_size) ? max_window_size : points;
+ return (points > (NETDATA_DOUBLE)max_window_size) ? (NETDATA_DOUBLE)max_window_size : points;
}
static inline void set_alpha(RRDR *r, struct grouping_ses *g) {
@@ -48,8 +48,8 @@ static inline void set_alpha(RRDR *r, struct grouping_ses *g) {
g->alpha_other = 1.0 - g->alpha;
}
-void grouping_create_ses(RRDR *r) {
- struct grouping_ses *g = (struct grouping_ses *)callocz(1, sizeof(struct grouping_ses));
+void grouping_create_ses(RRDR *r, const char *options __maybe_unused) {
+ struct grouping_ses *g = (struct grouping_ses *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_ses));
set_alpha(r, g);
g->level = 0.0;
r->internal.grouping_data = g;
@@ -64,11 +64,11 @@ void grouping_reset_ses(RRDR *r) {
}
void grouping_free_ses(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_ses(RRDR *r, calculated_number value) {
+void grouping_add_ses(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data;
if(unlikely(!g->count))
@@ -78,10 +78,10 @@ void grouping_add_ses(RRDR *r, calculated_number value) {
g->count++;
}
-calculated_number grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data;
- if(unlikely(!g->count || !calculated_number_isnumber(g->level))) {
+ if(unlikely(!g->count || !netdata_double_isnumber(g->level))) {
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
return 0.0;
}
diff --git a/web/api/queries/ses/ses.h b/web/api/queries/ses/ses.h
index c05f208f3..094b8de3f 100644
--- a/web/api/queries/ses/ses.h
+++ b/web/api/queries/ses/ses.h
@@ -8,10 +8,10 @@
extern void grouping_init_ses(void);
-extern void grouping_create_ses(RRDR *r);
+extern void grouping_create_ses(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_ses(RRDR *r);
extern void grouping_free_ses(RRDR *r);
-extern void grouping_add_ses(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_ses(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERIES_SES_H
diff --git a/web/api/queries/stddev/stddev.c b/web/api/queries/stddev/stddev.c
index ffe7a47c0..92a67b42d 100644
--- a/web/api/queries/stddev/stddev.c
+++ b/web/api/queries/stddev/stddev.c
@@ -11,11 +11,11 @@
struct grouping_stddev {
long count;
- calculated_number m_oldM, m_newM, m_oldS, m_newS;
+ NETDATA_DOUBLE m_oldM, m_newM, m_oldS, m_newS;
};
-void grouping_create_stddev(RRDR *r) {
- r->internal.grouping_data = callocz(1, sizeof(struct grouping_stddev));
+void grouping_create_stddev(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_stddev));
}
// resets when switches dimensions
@@ -26,11 +26,11 @@ void grouping_reset_stddev(RRDR *r) {
}
void grouping_free_stddev(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_stddev(RRDR *r, calculated_number value) {
+void grouping_add_stddev(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
g->count++;
@@ -50,26 +50,26 @@ void grouping_add_stddev(RRDR *r, calculated_number value) {
}
}
-static inline calculated_number mean(struct grouping_stddev *g) {
+static inline NETDATA_DOUBLE mean(struct grouping_stddev *g) {
return (g->count > 0) ? g->m_newM : 0.0;
}
-static inline calculated_number variance(struct grouping_stddev *g) {
- return ( (g->count > 1) ? g->m_newS/(g->count - 1) : 0.0 );
+static inline NETDATA_DOUBLE variance(struct grouping_stddev *g) {
+ return ( (g->count > 1) ? g->m_newS/(NETDATA_DOUBLE)(g->count - 1) : 0.0 );
}
-static inline calculated_number stddev(struct grouping_stddev *g) {
- return sqrtl(variance(g));
+static inline NETDATA_DOUBLE stddev(struct grouping_stddev *g) {
+ return sqrtndd(variance(g));
}
-calculated_number grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(likely(g->count > 1)) {
value = stddev(g);
- if(!calculated_number_isnumber(value)) {
+ if(!netdata_double_isnumber(value)) {
value = 0.0;
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
}
@@ -88,16 +88,16 @@ calculated_number grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_op
}
// https://en.wikipedia.org/wiki/Coefficient_of_variation
-calculated_number grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(likely(g->count > 1)) {
- calculated_number m = mean(g);
+ NETDATA_DOUBLE m = mean(g);
value = 100.0 * stddev(g) / ((m < 0)? -m : m);
- if(unlikely(!calculated_number_isnumber(value))) {
+ if(unlikely(!netdata_double_isnumber(value))) {
value = 0.0;
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
}
@@ -121,10 +121,10 @@ calculated_number grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FL
/*
* Mean = average
*
-calculated_number grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(unlikely(!g->count)) {
value = 0.0;
@@ -148,10 +148,10 @@ calculated_number grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_opti
/*
* It is not advised to use this version of variance directly
*
-calculated_number grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(unlikely(!g->count)) {
value = 0.0;
diff --git a/web/api/queries/stddev/stddev.h b/web/api/queries/stddev/stddev.h
index ab58fbe50..c5c91f88d 100644
--- a/web/api/queries/stddev/stddev.h
+++ b/web/api/queries/stddev/stddev.h
@@ -6,13 +6,13 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_stddev(RRDR *r);
+extern void grouping_create_stddev(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_stddev(RRDR *r);
extern void grouping_free_stddev(RRDR *r);
-extern void grouping_add_stddev(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
-extern calculated_number grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
-// extern calculated_number grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
-// extern calculated_number grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_stddev(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern NETDATA_DOUBLE grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+// extern NETDATA_DOUBLE grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+// extern NETDATA_DOUBLE grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERIES_STDDEV_H
diff --git a/web/api/queries/sum/sum.c b/web/api/queries/sum/sum.c
index 6bb012bb0..eec6e2ad0 100644
--- a/web/api/queries/sum/sum.c
+++ b/web/api/queries/sum/sum.c
@@ -6,12 +6,12 @@
// sum
struct grouping_sum {
- calculated_number sum;
+ NETDATA_DOUBLE sum;
size_t count;
};
-void grouping_create_sum(RRDR *r) {
- r->internal.grouping_data = callocz(1, sizeof(struct grouping_sum));
+void grouping_create_sum(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_sum));
}
// resets when switches dimensions
@@ -23,20 +23,20 @@ void grouping_reset_sum(RRDR *r) {
}
void grouping_free_sum(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_sum(RRDR *r, calculated_number value) {
+void grouping_add_sum(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data;
g->sum += value;
g->count++;
}
-calculated_number grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(unlikely(!g->count)) {
value = 0.0;
diff --git a/web/api/queries/sum/sum.h b/web/api/queries/sum/sum.h
index 05cb6185e..4e7e396e9 100644
--- a/web/api/queries/sum/sum.h
+++ b/web/api/queries/sum/sum.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_sum(RRDR *r);
+extern void grouping_create_sum(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_sum(RRDR *r);
extern void grouping_free_sum(RRDR *r);
-extern void grouping_add_sum(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_sum(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_SUM_H
diff --git a/web/api/queries/trimmed_mean/Makefile.am b/web/api/queries/trimmed_mean/Makefile.am
new file mode 100644
index 000000000..161784b8f
--- /dev/null
+++ b/web/api/queries/trimmed_mean/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/trimmed_mean/README.md b/web/api/queries/trimmed_mean/README.md
new file mode 100644
index 000000000..71cdb85db
--- /dev/null
+++ b/web/api/queries/trimmed_mean/README.md
@@ -0,0 +1,56 @@
+<!--
+title: "Trimmed Mean"
+description: "Use trimmed-mean in API queries and health entities to find the average value from a sample, eliminating any unwanted spikes in the returned metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/trimmed_mean/README.md
+-->
+
+# Trimmed Mean
+
+The trimmed mean is the average value of a series excluding the smallest and biggest points.
+
+Netdata applies linear interpolation on the last point, if the percentage requested to be excluded does not give a
+round number of points.
+
+The following percentile aliases are defined:
+
+- `trimmed-mean1`
+- `trimmed-mean2`
+- `trimmed-mean3`
+- `trimmed-mean5`
+- `trimmed-mean10`
+- `trimmed-mean15`
+- `trimmed-mean20`
+- `trimmed-mean25`
+
+The default `trimmed-mean` is an alias for `trimmed-mean5`.
+Any percentage may be requested using the `group_options` query parameter.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: trimmed-mean5 -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`trimmed-mean` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=trimmed-mean` in the URL and the additional parameter `group_options`
+may be used to request any percentage (e.g. `&group=trimmed-mean&group_options=29`).
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=trimmed-mean5&after=-60&label=trimmed-mean5&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Truncated_mean>.
diff --git a/web/api/queries/trimmed_mean/trimmed_mean.c b/web/api/queries/trimmed_mean/trimmed_mean.c
new file mode 100644
index 000000000..2277208a7
--- /dev/null
+++ b/web/api/queries/trimmed_mean/trimmed_mean.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "trimmed_mean.h"
+
+// ----------------------------------------------------------------------------
+// median
+
+struct grouping_trimmed_mean {
+ size_t series_size;
+ size_t next_pos;
+ NETDATA_DOUBLE percent;
+
+ NETDATA_DOUBLE *series;
+};
+
+static void grouping_create_trimmed_mean_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
+ long entries = r->group;
+ if(entries < 10) entries = 10;
+
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_trimmed_mean));
+ g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
+ g->series_size = (size_t)entries;
+
+ g->percent = def;
+ if(options && *options) {
+ g->percent = str2ndd(options, NULL);
+ if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
+ if(g->percent < 0.0) g->percent = 0.0;
+ if(g->percent > 50.0) g->percent = 50.0;
+ }
+
+ g->percent = 1.0 - ((g->percent / 100.0) * 2.0);
+ r->internal.grouping_data = g;
+}
+
+void grouping_create_trimmed_mean1(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 1.0);
+}
+void grouping_create_trimmed_mean2(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 2.0);
+}
+void grouping_create_trimmed_mean3(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 3.0);
+}
+void grouping_create_trimmed_mean5(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 5.0);
+}
+void grouping_create_trimmed_mean10(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 10.0);
+}
+void grouping_create_trimmed_mean15(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 15.0);
+}
+void grouping_create_trimmed_mean20(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 20.0);
+}
+void grouping_create_trimmed_mean25(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 25.0);
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_trimmed_mean(RRDR *r) {
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
+ g->next_pos = 0;
+}
+
+void grouping_free_trimmed_mean(RRDR *r) {
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
+ if(g) onewayalloc_freez(r->internal.owa, g->series);
+
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_trimmed_mean(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
+
+ if(unlikely(g->next_pos >= g->series_size)) {
+ g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
+ g->series_size *= 2;
+ }
+
+ g->series[g->next_pos++] = value;
+}
+
+NETDATA_DOUBLE grouping_flush_trimmed_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+ size_t available_slots = g->next_pos;
+
+ if(unlikely(!available_slots)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else if(available_slots == 1) {
+ value = g->series[0];
+ }
+ else {
+ sort_series(g->series, available_slots);
+
+ NETDATA_DOUBLE min = g->series[0];
+ NETDATA_DOUBLE max = g->series[available_slots - 1];
+
+ if (min != max) {
+ size_t slots_to_use = (size_t)((NETDATA_DOUBLE)available_slots * g->percent);
+ if(!slots_to_use) slots_to_use = 1;
+
+ NETDATA_DOUBLE percent_to_use = (NETDATA_DOUBLE)slots_to_use / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_delta = g->percent - percent_to_use;
+
+ NETDATA_DOUBLE percent_interpolation_slot = 0.0;
+ NETDATA_DOUBLE percent_last_slot = 0.0;
+ if(percent_delta > 0.0) {
+ NETDATA_DOUBLE percent_to_use_plus_1_slot = (NETDATA_DOUBLE)(slots_to_use + 1) / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_1slot = percent_to_use_plus_1_slot - percent_to_use;
+
+ percent_interpolation_slot = percent_delta / percent_1slot;
+ percent_last_slot = 1 - percent_interpolation_slot;
+ }
+
+ int start_slot, stop_slot, step, last_slot, interpolation_slot;
+ if(min >= 0.0 && max >= 0.0) {
+ start_slot = (int)((available_slots - slots_to_use) / 2);
+ stop_slot = start_slot + (int)slots_to_use;
+ last_slot = stop_slot - 1;
+ interpolation_slot = stop_slot;
+ step = 1;
+ }
+ else {
+ start_slot = (int)available_slots - 1 - (int)((available_slots - slots_to_use) / 2);
+ stop_slot = start_slot - (int)slots_to_use;
+ last_slot = stop_slot + 1;
+ interpolation_slot = stop_slot;
+ step = -1;
+ }
+
+ value = 0.0;
+ for(int slot = start_slot; slot != stop_slot ; slot += step)
+ value += g->series[slot];
+
+ size_t counted = slots_to_use;
+ if(percent_interpolation_slot > 0.0 && interpolation_slot >= 0 && interpolation_slot < (int)available_slots) {
+ value += g->series[interpolation_slot] * percent_interpolation_slot;
+ value += g->series[last_slot] * percent_last_slot;
+ counted++;
+ }
+
+ value = value / (NETDATA_DOUBLE)counted;
+ }
+ else
+ value = min;
+ }
+
+ if(unlikely(!netdata_double_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ //log_series_to_stderr(g->series, g->next_pos, value, "trimmed_mean");
+
+ g->next_pos = 0;
+
+ return value;
+}
diff --git a/web/api/queries/trimmed_mean/trimmed_mean.h b/web/api/queries/trimmed_mean/trimmed_mean.h
new file mode 100644
index 000000000..1a4f63e9c
--- /dev/null
+++ b/web/api/queries/trimmed_mean/trimmed_mean.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERIES_TRIMMED_MEAN_H
+#define NETDATA_API_QUERIES_TRIMMED_MEAN_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void grouping_create_trimmed_mean1(RRDR *r, const char *options);
+extern void grouping_create_trimmed_mean2(RRDR *r, const char *options);
+extern void grouping_create_trimmed_mean3(RRDR *r, const char *options);
+extern void grouping_create_trimmed_mean5(RRDR *r, const char *options);
+extern void grouping_create_trimmed_mean10(RRDR *r, const char *options);
+extern void grouping_create_trimmed_mean15(RRDR *r, const char *options);
+extern void grouping_create_trimmed_mean20(RRDR *r, const char *options);
+extern void grouping_create_trimmed_mean25(RRDR *r, const char *options);
+extern void grouping_reset_trimmed_mean(RRDR *r);
+extern void grouping_free_trimmed_mean(RRDR *r);
+extern void grouping_add_trimmed_mean(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_trimmed_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERIES_TRIMMED_MEAN_H
diff --git a/web/api/queries/weights.c b/web/api/queries/weights.c
new file mode 100644
index 000000000..97a00f91c
--- /dev/null
+++ b/web/api/queries/weights.c
@@ -0,0 +1,1220 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "daemon/common.h"
+#include "database/KolmogorovSmirnovDist.h"
+
+#define MAX_POINTS 10000
+int enable_metric_correlations = CONFIG_BOOLEAN_YES;
+int metric_correlations_version = 1;
+WEIGHTS_METHOD default_metric_correlations_method = WEIGHTS_METHOD_MC_KS2;
+
+typedef struct weights_stats {
+ NETDATA_DOUBLE max_base_high_ratio;
+ size_t db_points;
+ size_t result_points;
+ size_t db_queries;
+ size_t db_points_per_tier[RRD_STORAGE_TIERS];
+ size_t binary_searches;
+} WEIGHTS_STATS;
+
+// ----------------------------------------------------------------------------
+// parse and render metric correlations methods
+
+static struct {
+ const char *name;
+ WEIGHTS_METHOD value;
+} weights_methods[] = {
+ { "ks2" , WEIGHTS_METHOD_MC_KS2}
+ , { "volume" , WEIGHTS_METHOD_MC_VOLUME}
+ , { "anomaly-rate" , WEIGHTS_METHOD_ANOMALY_RATE}
+ , { NULL , 0 }
+};
+
+WEIGHTS_METHOD weights_string_to_method(const char *method) {
+ for(int i = 0; weights_methods[i].name ;i++)
+ if(strcmp(method, weights_methods[i].name) == 0)
+ return weights_methods[i].value;
+
+ return default_metric_correlations_method;
+}
+
+const char *weights_method_to_string(WEIGHTS_METHOD method) {
+ for(int i = 0; weights_methods[i].name ;i++)
+ if(weights_methods[i].value == method)
+ return weights_methods[i].name;
+
+ return "unknown";
+}
+
+// ----------------------------------------------------------------------------
+// The results per dimension are aggregated into a dictionary
+
+typedef enum {
+ RESULT_IS_BASE_HIGH_RATIO = (1 << 0),
+ RESULT_IS_PERCENTAGE_OF_TIME = (1 << 1),
+} RESULT_FLAGS;
+
+struct register_result {
+ RESULT_FLAGS flags;
+ RRDSET *st;
+ const char *chart_id;
+ const char *context;
+ const char *dim_name;
+ NETDATA_DOUBLE value;
+
+ struct register_result *next; // used to link contexts together
+};
+
+static void register_result_insert_callback(const char *name, void *value, void *data) {
+ (void)name;
+ (void)data;
+
+ struct register_result *t = (struct register_result *)value;
+
+ if(t->chart_id) t->chart_id = strdupz(t->chart_id);
+ if(t->context) t->context = strdupz(t->context);
+ if(t->dim_name) t->dim_name = strdupz(t->dim_name);
+}
+
+static void register_result_delete_callback(const char *name, void *value, void *data) {
+ (void)name;
+ (void)data;
+ struct register_result *t = (struct register_result *)value;
+
+ freez((void *)t->chart_id);
+ freez((void *)t->context);
+ freez((void *)t->dim_name);
+}
+
+static DICTIONARY *register_result_init() {
+ DICTIONARY *results = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
+ dictionary_register_insert_callback(results, register_result_insert_callback, results);
+ dictionary_register_delete_callback(results, register_result_delete_callback, results);
+ return results;
+}
+
+static void register_result_destroy(DICTIONARY *results) {
+ dictionary_destroy(results);
+}
+
+static void register_result(DICTIONARY *results,
+ RRDSET *st,
+ RRDDIM *d,
+ NETDATA_DOUBLE value,
+ RESULT_FLAGS flags,
+ WEIGHTS_STATS *stats,
+ bool register_zero) {
+
+ if(!netdata_double_isnumber(value)) return;
+
+ // make it positive
+ NETDATA_DOUBLE v = fabsndd(value);
+
+ // no need to store zero scored values
+ if(unlikely(fpclassify(v) == FP_ZERO && !register_zero))
+ return;
+
+ // keep track of the max of the baseline / highlight ratio
+ if(flags & RESULT_IS_BASE_HIGH_RATIO && v > stats->max_base_high_ratio)
+ stats->max_base_high_ratio = v;
+
+ struct register_result t = {
+ .flags = flags,
+ .st = st,
+ .chart_id = st->id,
+ .context = st->context,
+ .dim_name = d->name,
+ .value = v
+ };
+
+ char buf[5000 + 1];
+ snprintfz(buf, 5000, "%s:%s", st->id, d->name);
+ dictionary_set(results, buf, &t, sizeof(struct register_result));
+}
+
+// ----------------------------------------------------------------------------
+// Generation of JSON output for the results
+
+static void results_header_to_json(DICTIONARY *results __maybe_unused, BUFFER *wb,
+ long long after, long long before,
+ long long baseline_after, long long baseline_before,
+ long points, WEIGHTS_METHOD method,
+ RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions __maybe_unused, usec_t duration,
+ WEIGHTS_STATS *stats) {
+
+ buffer_sprintf(wb, "{\n"
+ "\t\"after\": %lld,\n"
+ "\t\"before\": %lld,\n"
+ "\t\"duration\": %lld,\n"
+ "\t\"points\": %ld,\n",
+ after,
+ before,
+ before - after,
+ points
+ );
+
+ if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME)
+ buffer_sprintf(wb, ""
+ "\t\"baseline_after\": %lld,\n"
+ "\t\"baseline_before\": %lld,\n"
+ "\t\"baseline_duration\": %lld,\n"
+ "\t\"baseline_points\": %ld,\n",
+ baseline_after,
+ baseline_before,
+ baseline_before - baseline_after,
+ points << shifts
+ );
+
+ buffer_sprintf(wb, ""
+ "\t\"statistics\": {\n"
+ "\t\t\"query_time_ms\": %f,\n"
+ "\t\t\"db_queries\": %zu,\n"
+ "\t\t\"query_result_points\": %zu,\n"
+ "\t\t\"binary_searches\": %zu,\n"
+ "\t\t\"db_points_read\": %zu,\n"
+ "\t\t\"db_points_per_tier\": [ ",
+ (double)duration / (double)USEC_PER_MS,
+ stats->db_queries,
+ stats->result_points,
+ stats->binary_searches,
+ stats->db_points
+ );
+
+ for(int tier = 0; tier < storage_tiers ;tier++)
+ buffer_sprintf(wb, "%s%zu", tier?", ":"", stats->db_points_per_tier[tier]);
+
+ buffer_sprintf(wb, " ]\n"
+ "\t},\n"
+ "\t\"group\": \"%s\",\n"
+ "\t\"method\": \"%s\",\n"
+ "\t\"options\": \"",
+ web_client_api_request_v1_data_group_to_string(group),
+ weights_method_to_string(method)
+ );
+
+ web_client_api_request_v1_data_options_to_string(wb, options);
+}
+
+static size_t registered_results_to_json_charts(DICTIONARY *results, BUFFER *wb,
+ long long after, long long before,
+ long long baseline_after, long long baseline_before,
+ long points, WEIGHTS_METHOD method,
+ RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions, usec_t duration,
+ WEIGHTS_STATS *stats) {
+
+ results_header_to_json(results, wb, after, before, baseline_after, baseline_before,
+ points, method, group, options, shifts, examined_dimensions, duration, stats);
+
+ buffer_strcat(wb, "\",\n\t\"correlated_charts\": {\n");
+
+ size_t charts = 0, chart_dims = 0, total_dimensions = 0;
+ struct register_result *t;
+ RRDSET *last_st = NULL; // never access this - we use it only for comparison
+ dfe_start_read(results, t) {
+ if(!last_st || t->st != last_st) {
+ last_st = t->st;
+
+ if(charts) buffer_strcat(wb, "\n\t\t\t}\n\t\t},\n");
+ buffer_strcat(wb, "\t\t\"");
+ buffer_strcat(wb, t->chart_id);
+ buffer_strcat(wb, "\": {\n");
+ buffer_strcat(wb, "\t\t\t\"context\": \"");
+ buffer_strcat(wb, t->context);
+ buffer_strcat(wb, "\",\n\t\t\t\"dimensions\": {\n");
+ charts++;
+ chart_dims = 0;
+ }
+ if (chart_dims) buffer_sprintf(wb, ",\n");
+ buffer_sprintf(wb, "\t\t\t\t\"%s\": " NETDATA_DOUBLE_FORMAT, t->dim_name, t->value);
+ chart_dims++;
+ total_dimensions++;
+ }
+ dfe_done(t);
+
+ // close dimensions and chart
+ if (total_dimensions)
+ buffer_strcat(wb, "\n\t\t\t}\n\t\t}\n");
+
+ // close correlated_charts
+ buffer_sprintf(wb, "\t},\n"
+ "\t\"correlated_dimensions\": %zu,\n"
+ "\t\"total_dimensions_count\": %zu\n"
+ "}\n",
+ total_dimensions,
+ examined_dimensions
+ );
+
+ return total_dimensions;
+}
+
+static size_t registered_results_to_json_contexts(DICTIONARY *results, BUFFER *wb,
+ long long after, long long before,
+ long long baseline_after, long long baseline_before,
+ long points, WEIGHTS_METHOD method,
+ RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions, usec_t duration,
+ WEIGHTS_STATS *stats) {
+
+ results_header_to_json(results, wb, after, before, baseline_after, baseline_before,
+ points, method, group, options, shifts, examined_dimensions, duration, stats);
+
+ DICTIONARY *context_results = dictionary_create(
+ DICTIONARY_FLAG_SINGLE_THREADED
+ |DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE
+ |DICTIONARY_FLAG_NAME_LINK_DONT_CLONE
+ |DICTIONARY_FLAG_DONT_OVERWRITE_VALUE
+ );
+
+ struct register_result *t;
+ dfe_start_read(results, t) {
+ struct register_result *tc = dictionary_set(context_results, t->context, t, sizeof(*t));
+ if(tc == t)
+ t->next = NULL;
+ else {
+ t->next = tc->next;
+ tc->next = t;
+ }
+ }
+ dfe_done(t);
+
+ buffer_strcat(wb, "\",\n\t\"contexts\": {\n");
+
+ size_t contexts = 0, total_dimensions = 0, charts = 0, context_dims = 0, chart_dims = 0;
+ NETDATA_DOUBLE contexts_total_weight = 0.0, charts_total_weight = 0.0;
+ RRDSET *last_st = NULL; // never access this - we use it only for comparison
+ dfe_start_read(context_results, t) {
+
+ if(contexts)
+ buffer_sprintf(wb, "\n\t\t\t\t\t},\n\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t},\n", charts_total_weight / chart_dims, contexts_total_weight / context_dims);
+
+ contexts++;
+ context_dims = 0;
+ contexts_total_weight = 0.0;
+
+ buffer_strcat(wb, "\t\t\"");
+ buffer_strcat(wb, t->context);
+ buffer_strcat(wb, "\": {\n\t\t\t\"charts\":{\n");
+
+ charts = 0;
+ chart_dims = 0;
+ struct register_result *tt;
+ for(tt = t; tt ; tt = tt->next) {
+ if(!last_st || tt->st != last_st) {
+ last_st = tt->st;
+
+ if(charts)
+ buffer_sprintf(wb, "\n\t\t\t\t\t},\n\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t\t\t},\n", charts_total_weight / chart_dims);
+
+ buffer_strcat(wb, "\t\t\t\t\"");
+ buffer_strcat(wb, tt->chart_id);
+ buffer_strcat(wb, "\": {\n");
+ buffer_strcat(wb, "\t\t\t\t\t\"dimensions\": {\n");
+ charts++;
+ chart_dims = 0;
+ charts_total_weight = 0.0;
+ }
+
+ if (chart_dims) buffer_sprintf(wb, ",\n");
+ buffer_sprintf(wb, "\t\t\t\t\t\t\"%s\": " NETDATA_DOUBLE_FORMAT, tt->dim_name, tt->value);
+ charts_total_weight += tt->value;
+ contexts_total_weight += tt->value;
+ chart_dims++;
+ context_dims++;
+ total_dimensions++;
+ }
+ }
+ dfe_done(t);
+
+ dictionary_destroy(context_results);
+
+ // close dimensions and chart
+ if (total_dimensions)
+ buffer_sprintf(wb, "\n\t\t\t\t\t},\n\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t}\n", charts_total_weight / chart_dims, contexts_total_weight / context_dims);
+
+ // close correlated_charts
+ buffer_sprintf(wb, "\t},\n"
+ "\t\"weighted_dimensions\": %zu,\n"
+ "\t\"total_dimensions_count\": %zu\n"
+ "}\n",
+ total_dimensions,
+ examined_dimensions
+ );
+
+ return total_dimensions;
+}
+
+// ----------------------------------------------------------------------------
+// KS2 algorithm functions
+
+typedef long int DIFFS_NUMBERS;
+#define DOUBLE_TO_INT_MULTIPLIER 100000
+
+static inline int binary_search_bigger_than(const DIFFS_NUMBERS arr[], int left, int size, DIFFS_NUMBERS K) {
+ // binary search to find the index the smallest index
+ // of the first value in the array that is greater than K
+
+ int right = size;
+ while(left < right) {
+ int middle = (int)(((unsigned int)(left + right)) >> 1);
+
+ if(arr[middle] > K)
+ right = middle;
+
+ else
+ left = middle + 1;
+ }
+
+ return left;
+}
+
+int compare_diffs(const void *left, const void *right) {
+ DIFFS_NUMBERS lt = *(DIFFS_NUMBERS *)left;
+ DIFFS_NUMBERS rt = *(DIFFS_NUMBERS *)right;
+
+ // https://stackoverflow.com/a/3886497/1114110
+ return (lt > rt) - (lt < rt);
+}
+
+static size_t calculate_pairs_diff(DIFFS_NUMBERS *diffs, NETDATA_DOUBLE *arr, size_t size) {
+ NETDATA_DOUBLE *last = &arr[size - 1];
+ size_t added = 0;
+
+ while(last > arr) {
+ NETDATA_DOUBLE second = *last--;
+ NETDATA_DOUBLE first = *last;
+ *diffs++ = (DIFFS_NUMBERS)((first - second) * (NETDATA_DOUBLE)DOUBLE_TO_INT_MULTIPLIER);
+ added++;
+ }
+
+ return added;
+}
+
+static double ks_2samp(DIFFS_NUMBERS baseline_diffs[], int base_size, DIFFS_NUMBERS highlight_diffs[], int high_size, uint32_t base_shifts) {
+
+ qsort(baseline_diffs, base_size, sizeof(DIFFS_NUMBERS), compare_diffs);
+ qsort(highlight_diffs, high_size, sizeof(DIFFS_NUMBERS), compare_diffs);
+
+ // Now we should be calculating this:
+ //
+ // For each number in the diffs arrays, we should find the index of the
+ // number bigger than them in both arrays and calculate the % of this index
+ // vs the total array size. Once we have the 2 percentages, we should find
+ // the min and max across the delta of all of them.
+ //
+ // It should look like this:
+ //
+ // base_pcent = binary_search_bigger_than(...) / base_size;
+ // high_pcent = binary_search_bigger_than(...) / high_size;
+ // delta = base_pcent - high_pcent;
+ // if(delta < min) min = delta;
+ // if(delta > max) max = delta;
+ //
+ // This would require a lot of multiplications and divisions.
+ //
+ // To speed it up, we do the binary search to find the index of each number
+ // but then we divide the base index by the power of two number (shifts) it
+ // is bigger than high index. So the 2 indexes are now comparable.
+ // We also keep track of the original indexes with min and max, to properly
+ // calculate their percentages once the loops finish.
+
+
+ // initialize min and max using the first number of baseline_diffs
+ DIFFS_NUMBERS K = baseline_diffs[0];
+ int base_idx = binary_search_bigger_than(baseline_diffs, 1, base_size, K);
+ int high_idx = binary_search_bigger_than(highlight_diffs, 0, high_size, K);
+ int delta = base_idx - (high_idx << base_shifts);
+ int min = delta, max = delta;
+ int base_min_idx = base_idx;
+ int base_max_idx = base_idx;
+ int high_min_idx = high_idx;
+ int high_max_idx = high_idx;
+
+ // do the baseline_diffs starting from 1 (we did position 0 above)
+ for(int i = 1; i < base_size; i++) {
+ K = baseline_diffs[i];
+ base_idx = binary_search_bigger_than(baseline_diffs, i + 1, base_size, K); // starting from i, since data1 is sorted
+ high_idx = binary_search_bigger_than(highlight_diffs, 0, high_size, K);
+
+ delta = base_idx - (high_idx << base_shifts);
+ if(delta < min) {
+ min = delta;
+ base_min_idx = base_idx;
+ high_min_idx = high_idx;
+ }
+ else if(delta > max) {
+ max = delta;
+ base_max_idx = base_idx;
+ high_max_idx = high_idx;
+ }
+ }
+
+ // do the highlight_diffs starting from 0
+ for(int i = 0; i < high_size; i++) {
+ K = highlight_diffs[i];
+ base_idx = binary_search_bigger_than(baseline_diffs, 0, base_size, K);
+ high_idx = binary_search_bigger_than(highlight_diffs, i + 1, high_size, K); // starting from i, since data2 is sorted
+
+ delta = base_idx - (high_idx << base_shifts);
+ if(delta < min) {
+ min = delta;
+ base_min_idx = base_idx;
+ high_min_idx = high_idx;
+ }
+ else if(delta > max) {
+ max = delta;
+ base_max_idx = base_idx;
+ high_max_idx = high_idx;
+ }
+ }
+
+ // now we have the min, max and their indexes
+ // properly calculate min and max as dmin and dmax
+ double dbase_size = (double)base_size;
+ double dhigh_size = (double)high_size;
+ double dmin = ((double)base_min_idx / dbase_size) - ((double)high_min_idx / dhigh_size);
+ double dmax = ((double)base_max_idx / dbase_size) - ((double)high_max_idx / dhigh_size);
+
+ dmin = -dmin;
+ if(islessequal(dmin, 0.0)) dmin = 0.0;
+ else if(isgreaterequal(dmin, 1.0)) dmin = 1.0;
+
+ double d;
+ if(isgreaterequal(dmin, dmax)) d = dmin;
+ else d = dmax;
+
+ double en = round(dbase_size * dhigh_size / (dbase_size + dhigh_size));
+
+ // under these conditions, KSfbar() crashes
+ if(unlikely(isnan(en) || isinf(en) || en == 0.0 || isnan(d) || isinf(d)))
+ return NAN;
+
+ return KSfbar((int)en, d);
+}
+
+static double kstwo(
+ NETDATA_DOUBLE baseline[], int baseline_points,
+ NETDATA_DOUBLE highlight[], int highlight_points, uint32_t base_shifts) {
+ // -1 in size, since the calculate_pairs_diffs() returns one less point
+ DIFFS_NUMBERS baseline_diffs[baseline_points - 1];
+ DIFFS_NUMBERS highlight_diffs[highlight_points - 1];
+
+ int base_size = (int)calculate_pairs_diff(baseline_diffs, baseline, baseline_points);
+ int high_size = (int)calculate_pairs_diff(highlight_diffs, highlight, highlight_points);
+
+ if(unlikely(!base_size || !high_size))
+ return NAN;
+
+ if(unlikely(base_size != baseline_points - 1 || high_size != highlight_points - 1)) {
+ error("Metric correlations: internal error - calculate_pairs_diff() returns the wrong number of entries");
+ return NAN;
+ }
+
+ return ks_2samp(baseline_diffs, base_size, highlight_diffs, high_size, base_shifts);
+}
+
+
+static int rrdset_metric_correlations_ks2(RRDSET *st, DICTIONARY *results,
+ long long baseline_after, long long baseline_before,
+ long long after, long long before,
+ long long points, RRDR_OPTIONS options,
+ RRDR_GROUPING group, const char *group_options, int tier,
+ uint32_t shifts, int timeout,
+ WEIGHTS_STATS *stats, bool register_zero) {
+ options |= RRDR_OPTION_NATURAL_POINTS;
+
+ long group_time = 0;
+ struct context_param *context_param_list = NULL;
+
+ int examined_dimensions = 0;
+
+ RRDR *high_rrdr = NULL;
+ RRDR *base_rrdr = NULL;
+
+ // get first the highlight to find the number of points available
+ stats->db_queries++;
+ usec_t started_usec = now_realtime_usec();
+ ONEWAYALLOC *owa = onewayalloc_create(0);
+ high_rrdr = rrd2rrdr(owa, st, points,
+ after, before, group,
+ group_time, options, NULL, context_param_list, group_options,
+ timeout, tier);
+ if(!high_rrdr) {
+ info("Metric correlations: rrd2rrdr() failed for the highlighted window on chart '%s'.", st->name);
+ goto cleanup;
+ }
+
+ for(int i = 0; i < storage_tiers ;i++)
+ stats->db_points_per_tier[i] += high_rrdr->internal.tier_points_read[i];
+
+ stats->db_points += high_rrdr->internal.db_points_read;
+ stats->result_points += high_rrdr->internal.result_points_generated;
+ if(!high_rrdr->d) {
+ info("Metric correlations: rrd2rrdr() did not return any dimensions on chart '%s'.", st->name);
+ goto cleanup;
+ }
+ if(high_rrdr->result_options & RRDR_RESULT_OPTION_CANCEL) {
+ info("Metric correlations: rrd2rrdr() on highlighted window timed out '%s'.", st->name);
+ goto cleanup;
+ }
+ int high_points = rrdr_rows(high_rrdr);
+
+ usec_t now_usec = now_realtime_usec();
+ if(now_usec - started_usec > timeout * USEC_PER_MS)
+ goto cleanup;
+
+ // get the baseline, requesting the same number of points as the highlight
+ stats->db_queries++;
+ base_rrdr = rrd2rrdr(owa, st,high_points << shifts,
+ baseline_after, baseline_before, group,
+ group_time, options, NULL, context_param_list, group_options,
+ (int)(timeout - ((now_usec - started_usec) / USEC_PER_MS)), tier);
+ if(!base_rrdr) {
+ info("Metric correlations: rrd2rrdr() failed for the baseline window on chart '%s'.", st->name);
+ goto cleanup;
+ }
+
+ for(int i = 0; i < storage_tiers ;i++)
+ stats->db_points_per_tier[i] += base_rrdr->internal.tier_points_read[i];
+
+ stats->db_points += base_rrdr->internal.db_points_read;
+ stats->result_points += base_rrdr->internal.result_points_generated;
+ if(!base_rrdr->d) {
+ info("Metric correlations: rrd2rrdr() did not return any dimensions on chart '%s'.", st->name);
+ goto cleanup;
+ }
+ if (base_rrdr->d != high_rrdr->d) {
+ info("Cannot generate metric correlations for chart '%s' when the baseline and the highlight have different number of dimensions.", st->name);
+ goto cleanup;
+ }
+ if(base_rrdr->result_options & RRDR_RESULT_OPTION_CANCEL) {
+ info("Metric correlations: rrd2rrdr() on baseline window timed out '%s'.", st->name);
+ goto cleanup;
+ }
+ int base_points = rrdr_rows(base_rrdr);
+
+ now_usec = now_realtime_usec();
+ if(now_usec - started_usec > timeout * USEC_PER_MS)
+ goto cleanup;
+
+ // we need at least 2 points to do the job
+ if(base_points < 2 || high_points < 2)
+ goto cleanup;
+
+ // for each dimension
+ RRDDIM *d;
+ int i;
+ for(i = 0, d = base_rrdr->st->dimensions ; d && i < base_rrdr->d; i++, d = d->next) {
+
+ // skip the not evaluated ones
+ if(unlikely(base_rrdr->od[i] & RRDR_DIMENSION_HIDDEN) || (high_rrdr->od[i] & RRDR_DIMENSION_HIDDEN))
+ continue;
+
+ examined_dimensions++;
+
+ // skip the dimensions that are just zero for both the baseline and the highlight
+ if(unlikely(!(base_rrdr->od[i] & RRDR_DIMENSION_NONZERO) && !(high_rrdr->od[i] & RRDR_DIMENSION_NONZERO)))
+ continue;
+
+ // copy the baseline points of the dimension to a contiguous array
+ // there is no need to check for empty values, since empty are already zero
+ NETDATA_DOUBLE baseline[base_points];
+ for(int c = 0; c < base_points; c++)
+ baseline[c] = base_rrdr->v[ c * base_rrdr->d + i ];
+
+ // copy the highlight points of the dimension to a contiguous array
+ // there is no need to check for empty values, since empty values are already zero
+ // https://github.com/netdata/netdata/blob/6e3144683a73a2024d51425b20ecfd569034c858/web/api/queries/average/average.c#L41-L43
+ NETDATA_DOUBLE highlight[high_points];
+ for(int c = 0; c < high_points; c++)
+ highlight[c] = high_rrdr->v[ c * high_rrdr->d + i ];
+
+ stats->binary_searches += 2 * (base_points - 1) + 2 * (high_points - 1);
+
+ double prob = kstwo(baseline, base_points, highlight, high_points, shifts);
+ if(!isnan(prob) && !isinf(prob)) {
+
+ // these conditions should never happen, but still let's check
+ if(unlikely(prob < 0.0)) {
+ error("Metric correlations: kstwo() returned a negative number: %f", prob);
+ prob = -prob;
+ }
+ if(unlikely(prob > 1.0)) {
+ error("Metric correlations: kstwo() returned a number above 1.0: %f", prob);
+ prob = 1.0;
+ }
+
+ // to spread the results evenly, 0.0 needs to be the less correlated and 1.0 the most correlated
+ // so we flip the result of kstwo()
+ register_result(results, base_rrdr->st, d, 1.0 - prob, RESULT_IS_BASE_HIGH_RATIO, stats, register_zero);
+ }
+ }
+
+cleanup:
+ rrdr_free(owa, high_rrdr);
+ rrdr_free(owa, base_rrdr);
+ onewayalloc_destroy(owa);
+ return examined_dimensions;
+}
+
+// ----------------------------------------------------------------------------
+// VOLUME algorithm functions
+
+static int rrdset_metric_correlations_volume(RRDSET *st, DICTIONARY *results,
+ long long baseline_after, long long baseline_before,
+ long long after, long long before,
+ RRDR_OPTIONS options, RRDR_GROUPING group, const char *group_options,
+ int tier, int timeout,
+ WEIGHTS_STATS *stats, bool register_zero) {
+
+ options |= RRDR_OPTION_MATCH_IDS | RRDR_OPTION_ABSOLUTE | RRDR_OPTION_NATURAL_POINTS;
+ long group_time = 0;
+
+ int examined_dimensions = 0;
+ int ret, value_is_null;
+ usec_t started_usec = now_realtime_usec();
+
+ RRDDIM *d;
+ for(d = st->dimensions; d ; d = d->next) {
+ usec_t now_usec = now_realtime_usec();
+ if(now_usec - started_usec > timeout * USEC_PER_MS)
+ return examined_dimensions;
+
+ // we count how many metrics we evaluated
+ examined_dimensions++;
+
+ // there is no point to pass a timeout to these queries
+ // since the query engine checks for a timeout between
+ // dimensions, and we query a single dimension at a time.
+
+ stats->db_queries++;
+ NETDATA_DOUBLE baseline_average = NAN;
+ NETDATA_DOUBLE base_anomaly_rate = 0;
+ value_is_null = 1;
+ ret = rrdset2value_api_v1(st, NULL, &baseline_average, d->id, 1,
+ baseline_after, baseline_before,
+ group, group_options, group_time, options,
+ NULL, NULL,
+ &stats->db_points, stats->db_points_per_tier,
+ &stats->result_points,
+ &value_is_null, &base_anomaly_rate, 0, tier);
+
+ if(ret != HTTP_RESP_OK || value_is_null || !netdata_double_isnumber(baseline_average)) {
+ // this means no data for the baseline window, but we may have data for the highlighted one - assume zero
+ baseline_average = 0.0;
+ }
+
+ stats->db_queries++;
+ NETDATA_DOUBLE highlight_average = NAN;
+ NETDATA_DOUBLE high_anomaly_rate = 0;
+ value_is_null = 1;
+ ret = rrdset2value_api_v1(st, NULL, &highlight_average, d->id, 1,
+ after, before,
+ group, group_options, group_time, options,
+ NULL, NULL,
+ &stats->db_points, stats->db_points_per_tier,
+ &stats->result_points,
+ &value_is_null, &high_anomaly_rate, 0, tier);
+
+ if(ret != HTTP_RESP_OK || value_is_null || !netdata_double_isnumber(highlight_average)) {
+ // this means no data for the highlighted duration - so skip it
+ continue;
+ }
+
+ if(baseline_average == highlight_average) {
+ // they are the same - let's move on
+ continue;
+ }
+
+ stats->db_queries++;
+ NETDATA_DOUBLE highlight_countif = NAN;
+ value_is_null = 1;
+
+ char highlighted_countif_options[50 + 1];
+ snprintfz(highlighted_countif_options, 50, "%s" NETDATA_DOUBLE_FORMAT, highlight_average < baseline_average ? "<":">", baseline_average);
+
+ ret = rrdset2value_api_v1(st, NULL, &highlight_countif, d->id, 1,
+ after, before,
+ RRDR_GROUPING_COUNTIF,highlighted_countif_options,
+ group_time, options,
+ NULL, NULL,
+ &stats->db_points, stats->db_points_per_tier,
+ &stats->result_points,
+ &value_is_null, NULL, 0, tier);
+
+ if(ret != HTTP_RESP_OK || value_is_null || !netdata_double_isnumber(highlight_countif)) {
+ info("MC: highlighted countif query failed, but highlighted average worked - strange...");
+ continue;
+ }
+
+ // this represents the percentage of time
+ // the highlighted window was above/below the baseline window
+ // (above or below depending on their averages)
+ highlight_countif = highlight_countif / 100.0; // countif returns 0 - 100.0
+
+ RESULT_FLAGS flags;
+ NETDATA_DOUBLE pcent = NAN;
+ if(isgreater(baseline_average, 0.0) || isless(baseline_average, 0.0)) {
+ flags = RESULT_IS_BASE_HIGH_RATIO;
+ pcent = (highlight_average - baseline_average) / baseline_average * highlight_countif;
+ }
+ else {
+ flags = RESULT_IS_PERCENTAGE_OF_TIME;
+ pcent = highlight_countif;
+ }
+
+ register_result(results, st, d, pcent, flags, stats, register_zero);
+ }
+
+ return examined_dimensions;
+}
+
+// ----------------------------------------------------------------------------
+// ANOMALY RATE algorithm functions
+
+static int rrdset_weights_anomaly_rate(RRDSET *st, DICTIONARY *results,
+ long long after, long long before,
+ RRDR_OPTIONS options, RRDR_GROUPING group, const char *group_options,
+ int tier, int timeout,
+ WEIGHTS_STATS *stats, bool register_zero) {
+
+ options |= RRDR_OPTION_MATCH_IDS | RRDR_OPTION_ANOMALY_BIT | RRDR_OPTION_NATURAL_POINTS;
+ long group_time = 0;
+
+ int examined_dimensions = 0;
+ int ret, value_is_null;
+ usec_t started_usec = now_realtime_usec();
+
+ RRDDIM *d;
+ for(d = st->dimensions; d ; d = d->next) {
+ usec_t now_usec = now_realtime_usec();
+ if(now_usec - started_usec > timeout * USEC_PER_MS)
+ return examined_dimensions;
+
+ // we count how many metrics we evaluated
+ examined_dimensions++;
+
+ // there is no point to pass a timeout to these queries
+ // since the query engine checks for a timeout between
+ // dimensions, and we query a single dimension at a time.
+
+ stats->db_queries++;
+ NETDATA_DOUBLE average = NAN;
+ NETDATA_DOUBLE anomaly_rate = 0;
+ value_is_null = 1;
+ ret = rrdset2value_api_v1(st, NULL, &average, d->id, 1,
+ after, before,
+ group, group_options, group_time, options,
+ NULL, NULL,
+ &stats->db_points, stats->db_points_per_tier,
+ &stats->result_points,
+ &value_is_null, &anomaly_rate, 0, tier);
+
+ if(ret == HTTP_RESP_OK || !value_is_null || netdata_double_isnumber(average))
+ register_result(results, st, d, average, 0, stats, register_zero);
+ }
+
+ return examined_dimensions;
+}
+
+// ----------------------------------------------------------------------------
+
+int compare_netdata_doubles(const void *left, const void *right) {
+ NETDATA_DOUBLE lt = *(NETDATA_DOUBLE *)left;
+ NETDATA_DOUBLE rt = *(NETDATA_DOUBLE *)right;
+
+ // https://stackoverflow.com/a/3886497/1114110
+ return (lt > rt) - (lt < rt);
+}
+
+static inline int binary_search_bigger_than_netdata_double(const NETDATA_DOUBLE arr[], int left, int size, NETDATA_DOUBLE K) {
+ // binary search to find the index the smallest index
+ // of the first value in the array that is greater than K
+
+ int right = size;
+ while(left < right) {
+ int middle = (int)(((unsigned int)(left + right)) >> 1);
+
+ if(arr[middle] > K)
+ right = middle;
+
+ else
+ left = middle + 1;
+ }
+
+ return left;
+}
+
+// ----------------------------------------------------------------------------
+// spread the results evenly according to their value
+
+static size_t spread_results_evenly(DICTIONARY *results, WEIGHTS_STATS *stats) {
+ struct register_result *t;
+
+ // count the dimensions
+ size_t dimensions = dictionary_stats_entries(results);
+ if(!dimensions) return 0;
+
+ if(stats->max_base_high_ratio == 0.0)
+ stats->max_base_high_ratio = 1.0;
+
+ // create an array of the right size and copy all the values in it
+ NETDATA_DOUBLE slots[dimensions];
+ dimensions = 0;
+ dfe_start_read(results, t) {
+ if(t->flags & (RESULT_IS_PERCENTAGE_OF_TIME))
+ t->value = t->value * stats->max_base_high_ratio;
+
+ slots[dimensions++] = t->value;
+ }
+ dfe_done(t);
+
+ // sort the array with the values of all dimensions
+ qsort(slots, dimensions, sizeof(NETDATA_DOUBLE), compare_netdata_doubles);
+
+ // skip the duplicates in the sorted array
+ NETDATA_DOUBLE last_value = NAN;
+ size_t unique_values = 0;
+ for(size_t i = 0; i < dimensions ;i++) {
+ if(likely(slots[i] != last_value))
+ slots[unique_values++] = last_value = slots[i];
+ }
+
+ // this cannot happen, but coverity thinks otherwise...
+ if(!unique_values)
+ unique_values = dimensions;
+
+ // calculate the weight of each slot, using the number of unique values
+ NETDATA_DOUBLE slot_weight = 1.0 / (NETDATA_DOUBLE)unique_values;
+
+ dfe_start_read(results, t) {
+ int slot = binary_search_bigger_than_netdata_double(slots, 0, (int)unique_values, t->value);
+ NETDATA_DOUBLE v = slot * slot_weight;
+ if(unlikely(v > 1.0)) v = 1.0;
+ v = 1.0 - v;
+ t->value = v;
+ }
+ dfe_done(t);
+
+ return dimensions;
+}
+
+// ----------------------------------------------------------------------------
+// The main function
+
+int web_api_v1_weights(RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS_FORMAT format,
+ RRDR_GROUPING group, const char *group_options,
+ long long baseline_after, long long baseline_before,
+ long long after, long long before,
+ long long points, RRDR_OPTIONS options, SIMPLE_PATTERN *contexts, int tier, int timeout) {
+ WEIGHTS_STATS stats = {};
+
+ DICTIONARY *results = register_result_init();
+ DICTIONARY *charts = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED|DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE);;
+ char *error = NULL;
+ int resp = HTTP_RESP_OK;
+
+ // if the user didn't give a timeout
+ // assume 60 seconds
+ if(!timeout)
+ timeout = 60 * MSEC_PER_SEC;
+
+ // if the timeout is less than 1 second
+ // make it at least 1 second
+ if(timeout < (long)(1 * MSEC_PER_SEC))
+ timeout = 1 * MSEC_PER_SEC;
+
+ usec_t timeout_usec = timeout * USEC_PER_MS;
+ usec_t started_usec = now_realtime_usec();
+
+ if(!rrdr_relative_window_to_absolute(&after, &before))
+ buffer_no_cacheable(wb);
+
+ if (before <= after) {
+ resp = HTTP_RESP_BAD_REQUEST;
+ error = "Invalid selected time-range.";
+ goto cleanup;
+ }
+
+ uint32_t shifts = 0;
+ if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME) {
+ if(!points) points = 500;
+
+ if(baseline_before <= API_RELATIVE_TIME_MAX)
+ baseline_before += after;
+
+ rrdr_relative_window_to_absolute(&baseline_after, &baseline_before);
+
+ if (baseline_before <= baseline_after) {
+ resp = HTTP_RESP_BAD_REQUEST;
+ error = "Invalid baseline time-range.";
+ goto cleanup;
+ }
+
+ // baseline should be a power of two multiple of highlight
+ long long base_delta = baseline_before - baseline_after;
+ long long high_delta = before - after;
+ uint32_t multiplier = (uint32_t)round((double)base_delta / (double)high_delta);
+
+ // check if the multiplier is a power of two
+ // https://stackoverflow.com/a/600306/1114110
+ if((multiplier & (multiplier - 1)) != 0) {
+ // it is not power of two
+ // let's find the closest power of two
+ // https://stackoverflow.com/a/466242/1114110
+ multiplier--;
+ multiplier |= multiplier >> 1;
+ multiplier |= multiplier >> 2;
+ multiplier |= multiplier >> 4;
+ multiplier |= multiplier >> 8;
+ multiplier |= multiplier >> 16;
+ multiplier++;
+ }
+
+ // convert the multiplier to the number of shifts
+ // we need to do, to divide baseline numbers to match
+ // the highlight ones
+ while(multiplier > 1) {
+ shifts++;
+ multiplier = multiplier >> 1;
+ }
+
+ // if the baseline size will not comply to MAX_POINTS
+ // lower the window of the baseline
+ while(shifts && (points << shifts) > MAX_POINTS)
+ shifts--;
+
+ // if the baseline size still does not comply to MAX_POINTS
+ // lower the resolution of the highlight and the baseline
+ while((points << shifts) > MAX_POINTS)
+ points = points >> 1;
+
+ if(points < 15) {
+ resp = HTTP_RESP_BAD_REQUEST;
+ error = "Too few points available, at least 15 are needed.";
+ goto cleanup;
+ }
+
+ // adjust the baseline to be multiplier times bigger than the highlight
+ baseline_after = baseline_before - (high_delta << shifts);
+ }
+
+ // dont lock here and wait for results
+ // get the charts and run mc after
+ RRDSET *st;
+ rrdhost_rdlock(host);
+ rrdset_foreach_read(st, host) {
+ if (rrdset_is_available_for_viewers(st)) {
+ if(!contexts || simple_pattern_matches(contexts, st->context))
+ dictionary_set(charts, st->name, NULL, 0);
+ }
+ }
+ rrdhost_unlock(host);
+
+ size_t examined_dimensions = 0;
+ void *ptr;
+
+ bool register_zero = true;
+ if(options & RRDR_OPTION_NONZERO) {
+ register_zero = false;
+ options &= ~RRDR_OPTION_NONZERO;
+ }
+
+ // for every chart in the dictionary
+ dfe_start_read(charts, ptr) {
+ usec_t now_usec = now_realtime_usec();
+ if(now_usec - started_usec > timeout_usec) {
+ error = "timed out";
+ resp = HTTP_RESP_GATEWAY_TIMEOUT;
+ goto cleanup;
+ }
+
+ st = rrdset_find_byname(host, ptr_name);
+ if(!st) continue;
+
+ rrdset_rdlock(st);
+
+ switch(method) {
+ case WEIGHTS_METHOD_ANOMALY_RATE:
+ options |= RRDR_OPTION_ANOMALY_BIT;
+ points = 1;
+ examined_dimensions += rrdset_weights_anomaly_rate(st, results,
+ after, before,
+ options, group, group_options, tier,
+ (int)(timeout - ((now_usec - started_usec) / USEC_PER_MS)),
+ &stats, register_zero);
+ break;
+
+ case WEIGHTS_METHOD_MC_VOLUME:
+ points = 1;
+ examined_dimensions += rrdset_metric_correlations_volume(st, results,
+ baseline_after, baseline_before,
+ after, before,
+ options, group, group_options, tier,
+ (int)(timeout - ((now_usec - started_usec) / USEC_PER_MS)),
+ &stats, register_zero);
+ break;
+
+ default:
+ case WEIGHTS_METHOD_MC_KS2:
+ examined_dimensions += rrdset_metric_correlations_ks2(st, results,
+ baseline_after, baseline_before,
+ after, before,
+ points, options, group, group_options, tier, shifts,
+ (int)(timeout - ((now_usec - started_usec) / USEC_PER_MS)),
+ &stats, register_zero);
+ break;
+ }
+
+ rrdset_unlock(st);
+ }
+ dfe_done(ptr);
+
+ if(!register_zero)
+ options |= RRDR_OPTION_NONZERO;
+
+ if(!(options & RRDR_OPTION_RETURN_RAW))
+ spread_results_evenly(results, &stats);
+
+ usec_t ended_usec = now_realtime_usec();
+
+ // generate the json output we need
+ buffer_flush(wb);
+
+ size_t added_dimensions = 0;
+ switch(format) {
+ case WEIGHTS_FORMAT_CHARTS:
+ added_dimensions = registered_results_to_json_charts(results, wb,
+ after, before,
+ baseline_after, baseline_before,
+ points, method, group, options, shifts,
+ examined_dimensions,
+ ended_usec - started_usec, &stats);
+ break;
+
+ default:
+ case WEIGHTS_FORMAT_CONTEXTS:
+ added_dimensions = registered_results_to_json_contexts(results, wb,
+ after, before,
+ baseline_after, baseline_before,
+ points, method, group, options, shifts,
+ examined_dimensions,
+ ended_usec - started_usec, &stats);
+ break;
+ }
+
+ if(!added_dimensions) {
+ error = "no results produced.";
+ resp = HTTP_RESP_NOT_FOUND;
+ }
+
+cleanup:
+ if(charts) dictionary_destroy(charts);
+ if(results) register_result_destroy(results);
+
+ if(error) {
+ buffer_flush(wb);
+ buffer_sprintf(wb, "{\"error\": \"%s\" }", error);
+ }
+
+ return resp;
+}
+
+// ----------------------------------------------------------------------------
+// unittest
+
+/*
+
+Unit tests against the output of this:
+
+https://github.com/scipy/scipy/blob/4cf21e753cf937d1c6c2d2a0e372fbc1dbbeea81/scipy/stats/_stats_py.py#L7275-L7449
+
+import matplotlib.pyplot as plt
+import pandas as pd
+import numpy as np
+import scipy as sp
+from scipy import stats
+
+data1 = np.array([ 1111, -2222, 33, 100, 100, 15555, -1, 19999, 888, 755, -1, -730 ])
+data2 = np.array([365, -123, 0])
+data1 = np.sort(data1)
+data2 = np.sort(data2)
+n1 = data1.shape[0]
+n2 = data2.shape[0]
+data_all = np.concatenate([data1, data2])
+cdf1 = np.searchsorted(data1, data_all, side='right') / n1
+cdf2 = np.searchsorted(data2, data_all, side='right') / n2
+print(data_all)
+print("\ndata1", data1, cdf1)
+print("\ndata2", data2, cdf2)
+cddiffs = cdf1 - cdf2
+print("\ncddiffs", cddiffs)
+minS = np.clip(-np.min(cddiffs), 0, 1)
+maxS = np.max(cddiffs)
+print("\nmin", minS)
+print("max", maxS)
+m, n = sorted([float(n1), float(n2)], reverse=True)
+en = m * n / (m + n)
+d = max(minS, maxS)
+prob = stats.distributions.kstwo.sf(d, np.round(en))
+print("\nprob", prob)
+
+*/
+
+static int double_expect(double v, const char *str, const char *descr) {
+ char buf[100 + 1];
+ snprintfz(buf, 100, "%0.6f", v);
+ int ret = strcmp(buf, str) ? 1 : 0;
+
+ fprintf(stderr, "%s %s, expected %s, got %s\n", ret?"FAILED":"OK", descr, str, buf);
+ return ret;
+}
+
+static int mc_unittest1(void) {
+ int bs = 3, hs = 3;
+ DIFFS_NUMBERS base[3] = { 1, 2, 3 };
+ DIFFS_NUMBERS high[3] = { 3, 4, 6 };
+
+ double prob = ks_2samp(base, bs, high, hs, 0);
+ return double_expect(prob, "0.222222", "3x3");
+}
+
+static int mc_unittest2(void) {
+ int bs = 6, hs = 3;
+ DIFFS_NUMBERS base[6] = { 1, 2, 3, 10, 10, 15 };
+ DIFFS_NUMBERS high[3] = { 3, 4, 6 };
+
+ double prob = ks_2samp(base, bs, high, hs, 1);
+ return double_expect(prob, "0.500000", "6x3");
+}
+
+static int mc_unittest3(void) {
+ int bs = 12, hs = 3;
+ DIFFS_NUMBERS base[12] = { 1, 2, 3, 10, 10, 15, 111, 19999, 8, 55, -1, -73 };
+ DIFFS_NUMBERS high[3] = { 3, 4, 6 };
+
+ double prob = ks_2samp(base, bs, high, hs, 2);
+ return double_expect(prob, "0.347222", "12x3");
+}
+
+static int mc_unittest4(void) {
+ int bs = 12, hs = 3;
+ DIFFS_NUMBERS base[12] = { 1111, -2222, 33, 100, 100, 15555, -1, 19999, 888, 755, -1, -730 };
+ DIFFS_NUMBERS high[3] = { 365, -123, 0 };
+
+ double prob = ks_2samp(base, bs, high, hs, 2);
+ return double_expect(prob, "0.777778", "12x3");
+}
+
+int mc_unittest(void) {
+ int errors = 0;
+
+ errors += mc_unittest1();
+ errors += mc_unittest2();
+ errors += mc_unittest3();
+ errors += mc_unittest4();
+
+ return errors;
+}
+
diff --git a/web/api/queries/weights.h b/web/api/queries/weights.h
new file mode 100644
index 000000000..f88a134f2
--- /dev/null
+++ b/web/api/queries/weights.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_WEIGHTS_H
+#define NETDATA_API_WEIGHTS_H 1
+
+#include "query.h"
+
+typedef enum {
+ WEIGHTS_METHOD_MC_KS2 = 1,
+ WEIGHTS_METHOD_MC_VOLUME = 2,
+ WEIGHTS_METHOD_ANOMALY_RATE = 3,
+} WEIGHTS_METHOD;
+
+typedef enum {
+ WEIGHTS_FORMAT_CHARTS = 1,
+ WEIGHTS_FORMAT_CONTEXTS = 2,
+} WEIGHTS_FORMAT;
+
+extern int enable_metric_correlations;
+extern int metric_correlations_version;
+extern WEIGHTS_METHOD default_metric_correlations_method;
+
+extern int web_api_v1_weights (RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS_FORMAT format,
+ RRDR_GROUPING group, const char *group_options,
+ long long baseline_after, long long baseline_before,
+ long long after, long long before,
+ long long points, RRDR_OPTIONS options, SIMPLE_PATTERN *contexts, int tier, int timeout);
+
+extern WEIGHTS_METHOD weights_string_to_method(const char *method);
+extern const char *weights_method_to_string(WEIGHTS_METHOD method);
+extern int mc_unittest(void);
+
+#endif //NETDATA_API_WEIGHTS_H
diff --git a/web/api/web_api_v1.c b/web/api/web_api_v1.c
index cb73f7c02..8bfc617fd 100644
--- a/web/api/web_api_v1.c
+++ b/web/api/web_api_v1.c
@@ -3,42 +3,45 @@
#include "web_api_v1.h"
char *api_secret;
-extern int aclk_use_new_cloud_arch;
static struct {
const char *name;
uint32_t hash;
RRDR_OPTIONS value;
} api_v1_data_options[] = {
- { "nonzero" , 0 , RRDR_OPTION_NONZERO}
- , {"flip" , 0 , RRDR_OPTION_REVERSED}
- , {"reversed" , 0 , RRDR_OPTION_REVERSED}
- , {"reverse" , 0 , RRDR_OPTION_REVERSED}
- , {"jsonwrap" , 0 , RRDR_OPTION_JSON_WRAP}
- , {"min2max" , 0 , RRDR_OPTION_MIN2MAX}
- , {"ms" , 0 , RRDR_OPTION_MILLISECONDS}
- , {"milliseconds" , 0 , RRDR_OPTION_MILLISECONDS}
- , {"abs" , 0 , RRDR_OPTION_ABSOLUTE}
- , {"absolute" , 0 , RRDR_OPTION_ABSOLUTE}
- , {"absolute_sum" , 0 , RRDR_OPTION_ABSOLUTE}
- , {"absolute-sum" , 0 , RRDR_OPTION_ABSOLUTE}
- , {"display_absolute", 0 , RRDR_OPTION_DISPLAY_ABS}
- , {"display-absolute", 0 , RRDR_OPTION_DISPLAY_ABS}
- , {"seconds" , 0 , RRDR_OPTION_SECONDS}
- , {"null2zero" , 0 , RRDR_OPTION_NULL2ZERO}
- , {"objectrows" , 0 , RRDR_OPTION_OBJECTSROWS}
- , {"google_json" , 0 , RRDR_OPTION_GOOGLE_JSON}
- , {"google-json" , 0 , RRDR_OPTION_GOOGLE_JSON}
- , {"percentage" , 0 , RRDR_OPTION_PERCENTAGE}
- , {"unaligned" , 0 , RRDR_OPTION_NOT_ALIGNED}
- , {"match_ids" , 0 , RRDR_OPTION_MATCH_IDS}
- , {"match-ids" , 0 , RRDR_OPTION_MATCH_IDS}
- , {"match_names" , 0 , RRDR_OPTION_MATCH_NAMES}
- , {"match-names" , 0 , RRDR_OPTION_MATCH_NAMES}
- , {"showcustomvars" , 0 , RRDR_OPTION_CUSTOM_VARS}
- , {"allow_past" , 0 , RRDR_OPTION_ALLOW_PAST}
- , {"anomaly-bit" , 0 , RRDR_OPTION_ANOMALY_BIT}
- , { NULL, 0, 0}
+ { "nonzero" , 0 , RRDR_OPTION_NONZERO}
+ , {"flip" , 0 , RRDR_OPTION_REVERSED}
+ , {"reversed" , 0 , RRDR_OPTION_REVERSED}
+ , {"reverse" , 0 , RRDR_OPTION_REVERSED}
+ , {"jsonwrap" , 0 , RRDR_OPTION_JSON_WRAP}
+ , {"min2max" , 0 , RRDR_OPTION_MIN2MAX}
+ , {"ms" , 0 , RRDR_OPTION_MILLISECONDS}
+ , {"milliseconds" , 0 , RRDR_OPTION_MILLISECONDS}
+ , {"abs" , 0 , RRDR_OPTION_ABSOLUTE}
+ , {"absolute" , 0 , RRDR_OPTION_ABSOLUTE}
+ , {"absolute_sum" , 0 , RRDR_OPTION_ABSOLUTE}
+ , {"absolute-sum" , 0 , RRDR_OPTION_ABSOLUTE}
+ , {"display_absolute" , 0 , RRDR_OPTION_DISPLAY_ABS}
+ , {"display-absolute" , 0 , RRDR_OPTION_DISPLAY_ABS}
+ , {"seconds" , 0 , RRDR_OPTION_SECONDS}
+ , {"null2zero" , 0 , RRDR_OPTION_NULL2ZERO}
+ , {"objectrows" , 0 , RRDR_OPTION_OBJECTSROWS}
+ , {"google_json" , 0 , RRDR_OPTION_GOOGLE_JSON}
+ , {"google-json" , 0 , RRDR_OPTION_GOOGLE_JSON}
+ , {"percentage" , 0 , RRDR_OPTION_PERCENTAGE}
+ , {"unaligned" , 0 , RRDR_OPTION_NOT_ALIGNED}
+ , {"match_ids" , 0 , RRDR_OPTION_MATCH_IDS}
+ , {"match-ids" , 0 , RRDR_OPTION_MATCH_IDS}
+ , {"match_names" , 0 , RRDR_OPTION_MATCH_NAMES}
+ , {"match-names" , 0 , RRDR_OPTION_MATCH_NAMES}
+ , {"showcustomvars" , 0 , RRDR_OPTION_CUSTOM_VARS}
+ , {"anomaly-bit" , 0 , RRDR_OPTION_ANOMALY_BIT}
+ , {"selected-tier" , 0 , RRDR_OPTION_SELECTED_TIER}
+ , {"raw" , 0 , RRDR_OPTION_RETURN_RAW}
+ , {"jw-anomaly-rates" , 0 , RRDR_OPTION_RETURN_JWAR}
+ , {"natural-points" , 0 , RRDR_OPTION_NATURAL_POINTS}
+ , {"virtual-points" , 0 , RRDR_OPTION_VIRTUAL_POINTS}
+ , {NULL , 0 , 0}
};
static struct {
@@ -162,8 +165,8 @@ void web_client_api_v1_management_init(void) {
api_secret = get_mgmt_api_key();
}
-inline uint32_t web_client_api_request_v1_data_options(char *o) {
- uint32_t ret = 0x00000000;
+inline RRDR_OPTIONS web_client_api_request_v1_data_options(char *o) {
+ RRDR_OPTIONS ret = 0x00000000;
char *tok;
while(o && *o && (tok = mystrsep(&o, ", |"))) {
@@ -182,6 +185,19 @@ inline uint32_t web_client_api_request_v1_data_options(char *o) {
return ret;
}
+void web_client_api_request_v1_data_options_to_string(BUFFER *wb, RRDR_OPTIONS options) {
+ RRDR_OPTIONS used = 0; // to prevent adding duplicates
+ int added = 0;
+ for(int i = 0; api_v1_data_options[i].name ; i++) {
+ if (unlikely((api_v1_data_options[i].value & options) && !(api_v1_data_options[i].value & used))) {
+ if(added) buffer_strcat(wb, ",");
+ buffer_strcat(wb, api_v1_data_options[i].name);
+ used |= api_v1_data_options[i].value;
+ added++;
+ }
+ }
+}
+
inline uint32_t web_client_api_request_v1_data_format(char *name) {
uint32_t hash = simple_hash(name);
int i;
@@ -358,6 +374,157 @@ inline int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_c
return web_client_api_request_single_chart(host, w, url, health_api_v1_chart_variables2json);
}
+static RRDCONTEXT_TO_JSON_OPTIONS rrdcontext_to_json_parse_options(char *o) {
+ RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE;
+ char *tok;
+
+ while(o && *o && (tok = mystrsep(&o, ", |"))) {
+ if(!*tok) continue;
+
+ if(!strcmp(tok, "full") || !strcmp(tok, "all"))
+ options |= RRDCONTEXT_OPTIONS_ALL;
+ else if(!strcmp(tok, "charts") || !strcmp(tok, "instances"))
+ options |= RRDCONTEXT_OPTION_SHOW_INSTANCES;
+ else if(!strcmp(tok, "dimensions") || !strcmp(tok, "metrics"))
+ options |= RRDCONTEXT_OPTION_SHOW_METRICS;
+ else if(!strcmp(tok, "queue"))
+ options |= RRDCONTEXT_OPTION_SHOW_QUEUED;
+ else if(!strcmp(tok, "flags"))
+ options |= RRDCONTEXT_OPTION_SHOW_FLAGS;
+ else if(!strcmp(tok, "uuids"))
+ options |= RRDCONTEXT_OPTION_SHOW_UUIDS;
+ else if(!strcmp(tok, "deleted"))
+ options |= RRDCONTEXT_OPTION_SHOW_DELETED;
+ else if(!strcmp(tok, "labels"))
+ options |= RRDCONTEXT_OPTION_SHOW_LABELS;
+ else if(!strcmp(tok, "deepscan"))
+ options |= RRDCONTEXT_OPTION_DEEPSCAN;
+ else if(!strcmp(tok, "hidden"))
+ options |= RRDCONTEXT_OPTION_SHOW_HIDDEN;
+ }
+
+ return options;
+}
+
+static int web_client_api_request_v1_context(RRDHOST *host, struct web_client *w, char *url) {
+ char *context = NULL;
+ RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE;
+ time_t after = 0, before = 0;
+ const char *chart_label_key = NULL, *chart_labels_filter = NULL;
+ BUFFER *dimensions = NULL;
+
+ buffer_flush(w->response.data);
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ // name and value are now the parameters
+ // they are not null and not empty
+
+ if(!strcmp(name, "context") || !strcmp(name, "ctx")) context = value;
+ else if(!strcmp(name, "after")) after = str2l(value);
+ else if(!strcmp(name, "before")) before = str2l(value);
+ else if(!strcmp(name, "options")) options = rrdcontext_to_json_parse_options(value);
+ else if(!strcmp(name, "chart_label_key")) chart_label_key = value;
+ else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value;
+ else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) {
+ if(!dimensions) dimensions = buffer_create(100);
+ buffer_strcat(dimensions, "|");
+ buffer_strcat(dimensions, value);
+ }
+ }
+
+ if(!context || !*context) {
+ buffer_sprintf(w->response.data, "No context is given at the request.");
+ return HTTP_RESP_BAD_REQUEST;
+ }
+
+ SIMPLE_PATTERN *chart_label_key_pattern = NULL;
+ SIMPLE_PATTERN *chart_labels_filter_pattern = NULL;
+ SIMPLE_PATTERN *chart_dimensions_pattern = NULL;
+
+ if(chart_label_key)
+ chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+
+ if(chart_labels_filter)
+ chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+
+ if(dimensions) {
+ chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+ buffer_free(dimensions);
+ }
+
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ int ret = rrdcontext_to_json(host, w->response.data, after, before, options, context, chart_label_key_pattern, chart_labels_filter_pattern, chart_dimensions_pattern);
+
+ simple_pattern_free(chart_label_key_pattern);
+ simple_pattern_free(chart_labels_filter_pattern);
+ simple_pattern_free(chart_dimensions_pattern);
+
+ return ret;
+}
+
+static int web_client_api_request_v1_contexts(RRDHOST *host, struct web_client *w, char *url) {
+ RRDCONTEXT_TO_JSON_OPTIONS options = RRDCONTEXT_OPTION_NONE;
+ time_t after = 0, before = 0;
+ const char *chart_label_key = NULL, *chart_labels_filter = NULL;
+ BUFFER *dimensions = NULL;
+
+ buffer_flush(w->response.data);
+
+ while(url) {
+ char *value = mystrsep(&url, "&");
+ if(!value || !*value) continue;
+
+ char *name = mystrsep(&value, "=");
+ if(!name || !*name) continue;
+ if(!value || !*value) continue;
+
+ // name and value are now the parameters
+ // they are not null and not empty
+
+ if(!strcmp(name, "after")) after = str2l(value);
+ else if(!strcmp(name, "before")) before = str2l(value);
+ else if(!strcmp(name, "options")) options = rrdcontext_to_json_parse_options(value);
+ else if(!strcmp(name, "chart_label_key")) chart_label_key = value;
+ else if(!strcmp(name, "chart_labels_filter")) chart_labels_filter = value;
+ else if(!strcmp(name, "dimension") || !strcmp(name, "dim") || !strcmp(name, "dimensions") || !strcmp(name, "dims")) {
+ if(!dimensions) dimensions = buffer_create(100);
+ buffer_strcat(dimensions, "|");
+ buffer_strcat(dimensions, value);
+ }
+ }
+
+ SIMPLE_PATTERN *chart_label_key_pattern = NULL;
+ SIMPLE_PATTERN *chart_labels_filter_pattern = NULL;
+ SIMPLE_PATTERN *chart_dimensions_pattern = NULL;
+
+ if(chart_label_key)
+ chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+
+ if(chart_labels_filter)
+ chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+
+ if(dimensions) {
+ chart_dimensions_pattern = simple_pattern_create(buffer_tostring(dimensions), ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+ buffer_free(dimensions);
+ }
+
+ w->response.data->contenttype = CT_APPLICATION_JSON;
+ int ret = rrdcontexts_to_json(host, w->response.data, after, before, options, chart_label_key_pattern, chart_labels_filter_pattern, chart_dimensions_pattern);
+
+ simple_pattern_free(chart_label_key_pattern);
+ simple_pattern_free(chart_labels_filter_pattern);
+ simple_pattern_free(chart_dimensions_pattern);
+
+ return ret;
+}
+
inline int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url) {
(void)url;
@@ -392,6 +559,7 @@ void fix_google_param(char *s) {
}
}
+
// returns the HTTP code
inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url) {
debug(D_WEB_CLIENT, "%llu: API v1 data with URL '%s'", w->id, url);
@@ -420,7 +588,8 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
char *context = NULL;
char *chart_label_key = NULL;
char *chart_labels_filter = NULL;
-
+ char *group_options = NULL;
+ int tier = 0;
int group = RRDR_GROUPING_AVERAGE;
int show_dimensions = 0;
uint32_t format = DATASOURCE_JSON;
@@ -454,6 +623,7 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
else if(!strcmp(name, "points")) points_str = value;
else if(!strcmp(name, "timeout")) timeout_str = value;
else if(!strcmp(name, "gtime")) group_time_str = value;
+ else if(!strcmp(name, "group_options")) group_options = value;
else if(!strcmp(name, "group")) {
group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE);
}
@@ -503,6 +673,11 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
else if(!strcmp(name, "max_anomaly_rates")) {
max_anomaly_rates_str = value;
}
+ else if(!strcmp(name, "tier")) {
+ tier = str2i(value);
+ if(tier >= 0 && tier < storage_tiers)
+ options |= RRDR_OPTION_SELECTED_TIER;
+ }
}
// validate the google parameters given
@@ -528,18 +703,23 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
uint32_t context_hash = simple_hash(context);
+ SIMPLE_PATTERN *chart_label_key_pattern = NULL;
+ if(chart_label_key)
+ chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+
+ SIMPLE_PATTERN *chart_labels_filter_pattern = NULL;
+ if(chart_labels_filter)
+ chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
+
rrdhost_rdlock(host);
- char *words[MAX_CHART_LABELS_FILTER];
- uint32_t hash_key_list[MAX_CHART_LABELS_FILTER];
- int word_count = 0;
rrdset_foreach_read(st1, host) {
if (st1->hash_context == context_hash && !strcmp(st1->context, context) &&
- (!chart_label_key || rrdset_contains_label_keylist(st1, chart_label_key)) &&
- (!chart_labels_filter ||
- rrdset_matches_label_keys(st1, chart_labels_filter, words, hash_key_list, &word_count, MAX_CHART_LABELS_FILTER)))
+ (!chart_label_key_pattern || rrdlabels_match_simple_pattern_parsed(st1->state->chart_labels, chart_label_key_pattern, ':')) &&
+ (!chart_labels_filter_pattern || rrdlabels_match_simple_pattern_parsed(st1->state->chart_labels, chart_labels_filter_pattern, ':')))
build_context_param_list(owa, &context_param_list, st1);
}
rrdhost_unlock(host);
+
if (likely(context_param_list && context_param_list->rd)) // Just set the first one
st = context_param_list->rd->rrdset;
else {
@@ -656,7 +836,7 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
.wb = w->response.data};
ret = rrdset2anything_api_v1(owa, st, &query_params, dimensions, format,
- points, after, before, group, group_time, options, &last_timestamp_in_data);
+ points, after, before, group, group_options, group_time, options, &last_timestamp_in_data, tier);
free_context_param_list(owa, &context_param_list);
@@ -903,8 +1083,6 @@ static inline void web_client_api_request_v1_info_mirrored_hosts(BUFFER *wb) {
buffer_strcat(wb, "\t\"mirrored_hosts\": [\n");
rrd_rdlock();
rrdhost_foreach_read(host) {
- if (rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED))
- continue;
if (count > 0)
buffer_strcat(wb, ",\n");
@@ -916,8 +1094,6 @@ static inline void web_client_api_request_v1_info_mirrored_hosts(BUFFER *wb) {
count = 0;
rrdhost_foreach_read(host)
{
- if (rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED))
- continue;
if (count > 0)
buffer_strcat(wb, ",\n");
@@ -964,22 +1140,8 @@ inline void host_labels2json(RRDHOST *host, BUFFER *wb, size_t indentation) {
indentation--;
}
- int count = 0;
- rrdhost_rdlock(host);
- netdata_rwlock_rdlock(&host->labels.labels_rwlock);
- for (struct label *label = host->labels.head; label; label = label->next) {
- if(count > 0) buffer_strcat(wb, ",\n");
- buffer_strcat(wb, tabs);
-
- char value[CONFIG_MAX_VALUE * 2 + 1];
- sanitize_json_string(value, label->value, CONFIG_MAX_VALUE * 2);
- buffer_sprintf(wb, "\"%s\": \"%s\"", label->key, value);
-
- count++;
- }
+ rrdlabels_to_buffer(host->host_labels, wb, tabs, ":", "\"", ",\n", NULL, NULL, NULL, NULL);
buffer_strcat(wb, "\n");
- netdata_rwlock_unlock(&host->labels.labels_rwlock);
- rrdhost_unlock(host);
}
extern int aclk_connected;
@@ -1054,11 +1216,7 @@ inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb)
#ifdef ENABLE_ACLK
buffer_strcat(wb, "\t\"cloud-available\": true,\n");
buffer_strcat(wb, "\t\"aclk-ng-available\": true,\n");
-#ifdef ENABLE_NEW_CLOUD_PROTOCOL
buffer_strcat(wb, "\t\"aclk-ng-new-cloud-protocol\": true,\n");
-#else
- buffer_strcat(wb, "\t\"aclk-ng-new-cloud-protocol\": false,\n");
-#endif
buffer_strcat(wb, "\t\"aclk-legacy-available\": false,\n");
buffer_strcat(wb, "\t\"aclk-implementation\": \"Next Generation\",\n");
#else
@@ -1066,7 +1224,7 @@ inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb)
buffer_strcat(wb, "\t\"aclk-ng-available\": false,\n");
buffer_strcat(wb, "\t\"aclk-legacy-available\": false,\n");
#endif
- char *agent_id = is_agent_claimed();
+ char *agent_id = get_agent_claimid();
if (agent_id == NULL)
buffer_strcat(wb, "\t\"agent-claimed\": false,\n");
else {
@@ -1076,12 +1234,7 @@ inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb)
#ifdef ENABLE_ACLK
if (aclk_connected) {
buffer_strcat(wb, "\t\"aclk-available\": true,\n");
-#ifdef ENABLE_NEW_CLOUD_PROTOCOL
- if (aclk_use_new_cloud_arch)
- buffer_strcat(wb, "\t\"aclk-available-protocol\": \"New\",\n");
- else
-#endif
- buffer_strcat(wb, "\t\"aclk-available-protocol\": \"Legacy\",\n");
+ buffer_strcat(wb, "\t\"aclk-available-protocol\": \"New\",\n");
}
else
#endif
@@ -1323,12 +1476,18 @@ static int web_client_api_request_v1_aclk_state(RRDHOST *host, struct web_client
return HTTP_RESP_OK;
}
-int web_client_api_request_v1_metric_correlations(RRDHOST *host, struct web_client *w, char *url) {
+static int web_client_api_request_v1_weights_internal(RRDHOST *host, struct web_client *w, char *url, WEIGHTS_METHOD method, WEIGHTS_FORMAT format) {
if (!netdata_ready)
return HTTP_RESP_BACKEND_FETCH_FAILED;
- long long baseline_after = 0, baseline_before = 0, highlight_after = 0, highlight_before = 0, max_points = 0;
-
+ long long baseline_after = 0, baseline_before = 0, after = 0, before = 0, points = 0;
+ RRDR_OPTIONS options = RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NONZERO | RRDR_OPTION_NULL2ZERO;
+ int options_count = 0;
+ RRDR_GROUPING group = RRDR_GROUPING_AVERAGE;
+ int timeout = 0;
+ int tier = 0;
+ const char *group_options = NULL, *contexts_str = NULL;
+
while (url) {
char *value = mystrsep(&url, "&");
if (!value || !*value)
@@ -1342,30 +1501,165 @@ int web_client_api_request_v1_metric_correlations(RRDHOST *host, struct web_clie
if (!strcmp(name, "baseline_after"))
baseline_after = (long long) strtoul(value, NULL, 0);
+
else if (!strcmp(name, "baseline_before"))
baseline_before = (long long) strtoul(value, NULL, 0);
- else if (!strcmp(name, "highlight_after"))
- highlight_after = (long long) strtoul(value, NULL, 0);
- else if (!strcmp(name, "highlight_before"))
- highlight_before = (long long) strtoul(value, NULL, 0);
- else if (!strcmp(name, "max_points"))
- max_points = (long long) strtoul(value, NULL, 0);
-
+
+ else if (!strcmp(name, "after") || !strcmp(name, "highlight_after"))
+ after = (long long) strtoul(value, NULL, 0);
+
+ else if (!strcmp(name, "before") || !strcmp(name, "highlight_before"))
+ before = (long long) strtoul(value, NULL, 0);
+
+ else if (!strcmp(name, "points") || !strcmp(name, "max_points"))
+ points = (long long) strtoul(value, NULL, 0);
+
+ else if (!strcmp(name, "timeout"))
+ timeout = (int) strtoul(value, NULL, 0);
+
+ else if(!strcmp(name, "group"))
+ group = web_client_api_request_v1_data_group(value, RRDR_GROUPING_AVERAGE);
+
+ else if(!strcmp(name, "options")) {
+ if(!options_count) options = RRDR_OPTION_NOT_ALIGNED | RRDR_OPTION_NULL2ZERO;
+ options |= web_client_api_request_v1_data_options(value);
+ options_count++;
+ }
+
+ else if(!strcmp(name, "method"))
+ method = weights_string_to_method(value);
+
+ else if(!strcmp(name, "context") || !strcmp(name, "contexts"))
+ contexts_str = value;
+
+ else if(!strcmp(name, "tier")) {
+ tier = str2i(value);
+ if(tier >= 0 && tier < storage_tiers)
+ options |= RRDR_OPTION_SELECTED_TIER;
+ }
}
BUFFER *wb = w->response.data;
buffer_flush(wb);
wb->contenttype = CT_APPLICATION_JSON;
+
+ SIMPLE_PATTERN *contexts = (contexts_str) ? simple_pattern_create(contexts_str, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT) : NULL;
+
+ int ret = web_api_v1_weights(host, wb, method, format, group, group_options, baseline_after, baseline_before, after, before, points, options, contexts, tier, timeout);
+
+ simple_pattern_free(contexts);
+ return ret;
+}
+
+int web_client_api_request_v1_metric_correlations(RRDHOST *host, struct web_client *w, char *url) {
+ return web_client_api_request_v1_weights_internal(host, w, url, default_metric_correlations_method, WEIGHTS_FORMAT_CHARTS);
+}
+
+int web_client_api_request_v1_weights(RRDHOST *host, struct web_client *w, char *url) {
+ return web_client_api_request_v1_weights_internal(host, w, url, WEIGHTS_METHOD_ANOMALY_RATE, WEIGHTS_FORMAT_CONTEXTS);
+}
+
+#ifndef ENABLE_DBENGINE
+int web_client_api_request_v1_dbengine_stats(RRDHOST *host, struct web_client *w, char *url) {
+ return HTTP_RESP_NOT_FOUND;
+}
+#else
+static void web_client_api_v1_dbengine_stats_for_tier(BUFFER *wb, int tier) {
+ RRDENG_SIZE_STATS stats = rrdeng_size_statistics(multidb_ctx[tier]);
+
+ buffer_sprintf(wb,
+ "\n\t\t\"default_granularity_secs\":%zu"
+ ",\n\t\t\"sizeof_metric\":%zu"
+ ",\n\t\t\"sizeof_metric_in_index\":%zu"
+ ",\n\t\t\"sizeof_page\":%zu"
+ ",\n\t\t\"sizeof_page_in_index\":%zu"
+ ",\n\t\t\"sizeof_extent\":%zu"
+ ",\n\t\t\"sizeof_page_in_extent\":%zu"
+ ",\n\t\t\"sizeof_datafile\":%zu"
+ ",\n\t\t\"sizeof_page_in_cache\":%zu"
+ ",\n\t\t\"sizeof_point_data\":%zu"
+ ",\n\t\t\"sizeof_page_data\":%zu"
+ ",\n\t\t\"pages_per_extent\":%zu"
+ ",\n\t\t\"datafiles\":%zu"
+ ",\n\t\t\"extents\":%zu"
+ ",\n\t\t\"extents_pages\":%zu"
+ ",\n\t\t\"points\":%zu"
+ ",\n\t\t\"metrics\":%zu"
+ ",\n\t\t\"metrics_pages\":%zu"
+ ",\n\t\t\"extents_compressed_bytes\":%zu"
+ ",\n\t\t\"pages_uncompressed_bytes\":%zu"
+ ",\n\t\t\"pages_duration_secs\":%ld"
+ ",\n\t\t\"single_point_pages\":%zu"
+ ",\n\t\t\"first_t\":%llu"
+ ",\n\t\t\"last_t\":%llu"
+ ",\n\t\t\"database_retention_secs\":%ld"
+ ",\n\t\t\"average_compression_savings\":%0.2f"
+ ",\n\t\t\"average_point_duration_secs\":%0.2f"
+ ",\n\t\t\"average_metric_retention_secs\":%0.2f"
+ ",\n\t\t\"ephemeral_metrics_per_day_percent\":%0.2f"
+ ",\n\t\t\"average_page_size_bytes\":%0.2f"
+ ",\n\t\t\"estimated_concurrently_collected_metrics\":%zu"
+ ",\n\t\t\"currently_collected_metrics\":%zu"
+ ",\n\t\t\"max_concurrently_collected_metrics\":%zu"
+ ",\n\t\t\"disk_space\":%zu"
+ ",\n\t\t\"max_disk_space\":%zu"
+ , stats.default_granularity_secs
+ , stats.sizeof_metric
+ , stats.sizeof_metric_in_index
+ , stats.sizeof_page
+ , stats.sizeof_page_in_index
+ , stats.sizeof_extent
+ , stats.sizeof_page_in_extent
+ , stats.sizeof_datafile
+ , stats.sizeof_page_in_cache
+ , stats.sizeof_point_data
+ , stats.sizeof_page_data
+ , stats.pages_per_extent
+ , stats.datafiles
+ , stats.extents
+ , stats.extents_pages
+ , stats.points
+ , stats.metrics
+ , stats.metrics_pages
+ , stats.extents_compressed_bytes
+ , stats.pages_uncompressed_bytes
+ , stats.pages_duration_secs
+ , stats.single_point_pages
+ , stats.first_t
+ , stats.last_t
+ , stats.database_retention_secs
+ , stats.average_compression_savings
+ , stats.average_point_duration_secs
+ , stats.average_metric_retention_secs
+ , stats.ephemeral_metrics_per_day_percent
+ , stats.average_page_size_bytes
+ , stats.estimated_concurrently_collected_metrics
+ , stats.currently_collected_metrics
+ , stats.max_concurrently_collected_metrics
+ , stats.disk_space
+ , stats.max_disk_space
+ );
+}
+int web_client_api_request_v1_dbengine_stats(RRDHOST *host __maybe_unused, struct web_client *w, char *url __maybe_unused) {
+ if (!netdata_ready)
+ return HTTP_RESP_BACKEND_FETCH_FAILED;
+
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+ wb->contenttype = CT_APPLICATION_JSON;
buffer_no_cacheable(wb);
- if (!highlight_after || !highlight_before)
- buffer_strcat(wb, "{\"error\": \"Missing or invalid required highlight after and before parameters.\" }");
- else {
- metric_correlations(host, wb, baseline_after, baseline_before, highlight_after, highlight_before, max_points);
+ buffer_strcat(wb, "{");
+ for(int tier = 0; tier < storage_tiers ;tier++) {
+ buffer_sprintf(wb, "%s\n\t\"tier%d\": {", tier?",":"", tier);
+ web_client_api_v1_dbengine_stats_for_tier(wb, tier);
+ buffer_strcat(wb, "\n\t}");
}
+ buffer_strcat(wb, "\n}");
return HTTP_RESP_OK;
}
+#endif
static struct api_command {
const char *command;
@@ -1377,6 +1671,8 @@ static struct api_command {
{ "data", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_data },
{ "chart", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_chart },
{ "charts", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_charts },
+ { "context", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_context },
+ { "contexts", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_contexts },
{ "archivedcharts", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_archivedcharts },
// registry checks the ACL by itself, so we allow everything
@@ -1401,6 +1697,10 @@ static struct api_command {
{ "manage/health", 0, WEB_CLIENT_ACL_MGMT, web_client_api_request_v1_mgmt_health },
{ "aclk", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_aclk_state },
{ "metric_correlations", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_metric_correlations },
+ { "weights", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_weights },
+
+ { "dbengine_stats", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_dbengine_stats },
+
// terminator
{ NULL, 0, WEB_CLIENT_ACL_NONE, NULL },
};
diff --git a/web/api/web_api_v1.h b/web/api/web_api_v1.h
index a88c511ad..544f1e574 100644
--- a/web/api/web_api_v1.h
+++ b/web/api/web_api_v1.h
@@ -7,9 +7,12 @@
#include "web/api/badges/web_buffer_svg.h"
#include "web/api/formatters/rrd2json.h"
#include "web/api/health/health_cmdapi.h"
+#include "web/api/queries/weights.h"
#define MAX_CHART_LABELS_FILTER (32)
-extern uint32_t web_client_api_request_v1_data_options(char *o);
+extern RRDR_OPTIONS web_client_api_request_v1_data_options(char *o);
+extern void web_client_api_request_v1_data_options_to_string(BUFFER *wb, RRDR_OPTIONS options);
+
extern uint32_t web_client_api_request_v1_data_format(char *name);
extern uint32_t web_client_api_request_v1_data_google_format(char *name);