summaryrefslogtreecommitdiffstats
path: root/web/api/queries
diff options
context:
space:
mode:
Diffstat (limited to 'web/api/queries')
-rw-r--r--web/api/queries/Makefile.am3
-rw-r--r--web/api/queries/average/average.c14
-rw-r--r--web/api/queries/average/average.h6
-rw-r--r--web/api/queries/countif/Makefile.am8
-rw-r--r--web/api/queries/countif/README.md36
-rw-r--r--web/api/queries/countif/countif.c136
-rw-r--r--web/api/queries/countif/countif.h15
-rw-r--r--web/api/queries/des/des.c36
-rw-r--r--web/api/queries/des/des.h6
-rw-r--r--web/api/queries/incremental_sum/incremental_sum.c16
-rw-r--r--web/api/queries/incremental_sum/incremental_sum.h6
-rw-r--r--web/api/queries/max/max.c16
-rw-r--r--web/api/queries/max/max.h6
-rw-r--r--web/api/queries/median/README.md17
-rw-r--r--web/api/queries/median/median.c111
-rw-r--r--web/api/queries/median/median.h14
-rw-r--r--web/api/queries/min/min.c16
-rw-r--r--web/api/queries/min/min.h6
-rw-r--r--web/api/queries/percentile/Makefile.am8
-rw-r--r--web/api/queries/percentile/README.md58
-rw-r--r--web/api/queries/percentile/percentile.c169
-rw-r--r--web/api/queries/percentile/percentile.h23
-rw-r--r--web/api/queries/query.c2395
-rw-r--r--web/api/queries/query.h35
-rw-r--r--web/api/queries/rrdr.c90
-rw-r--r--web/api/queries/rrdr.h92
-rw-r--r--web/api/queries/ses/ses.c28
-rw-r--r--web/api/queries/ses/ses.h6
-rw-r--r--web/api/queries/stddev/stddev.c42
-rw-r--r--web/api/queries/stddev/stddev.h12
-rw-r--r--web/api/queries/sum/sum.c14
-rw-r--r--web/api/queries/sum/sum.h6
-rw-r--r--web/api/queries/trimmed_mean/Makefile.am8
-rw-r--r--web/api/queries/trimmed_mean/README.md56
-rw-r--r--web/api/queries/trimmed_mean/trimmed_mean.c166
-rw-r--r--web/api/queries/trimmed_mean/trimmed_mean.h22
-rw-r--r--web/api/queries/weights.c1220
-rw-r--r--web/api/queries/weights.h33
38 files changed, 3718 insertions, 1233 deletions
diff --git a/web/api/queries/Makefile.am b/web/api/queries/Makefile.am
index 34bfdb85b..7c4c43520 100644
--- a/web/api/queries/Makefile.am
+++ b/web/api/queries/Makefile.am
@@ -5,14 +5,17 @@ MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
SUBDIRS = \
average \
+ countif \
des \
incremental_sum \
max \
min \
sum \
median \
+ percentile \
ses \
stddev \
+ trimmed_mean \
$(NULL)
dist_noinst_DATA = \
diff --git a/web/api/queries/average/average.c b/web/api/queries/average/average.c
index 2ed33da50..0719d57fa 100644
--- a/web/api/queries/average/average.c
+++ b/web/api/queries/average/average.c
@@ -6,12 +6,12 @@
// average
struct grouping_average {
- calculated_number sum;
+ NETDATA_DOUBLE sum;
size_t count;
};
-void grouping_create_average(RRDR *r) {
- r->internal.grouping_data = callocz(1, sizeof(struct grouping_average));
+void grouping_create_average(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_average));
}
// resets when switches dimensions
@@ -23,20 +23,20 @@ void grouping_reset_average(RRDR *r) {
}
void grouping_free_average(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_average(RRDR *r, calculated_number value) {
+void grouping_add_average(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data;
g->sum += value;
g->count++;
}
-calculated_number grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_average *g = (struct grouping_average *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(unlikely(!g->count)) {
value = 0.0;
diff --git a/web/api/queries/average/average.h b/web/api/queries/average/average.h
index 23ecfac6f..55c51722c 100644
--- a/web/api/queries/average/average.h
+++ b/web/api/queries/average/average.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_average(RRDR *r);
+extern void grouping_create_average(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_average(RRDR *r);
extern void grouping_free_average(RRDR *r);
-extern void grouping_add_average(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_average(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_AVERAGE_H
diff --git a/web/api/queries/countif/Makefile.am b/web/api/queries/countif/Makefile.am
new file mode 100644
index 000000000..161784b8f
--- /dev/null
+++ b/web/api/queries/countif/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/countif/README.md b/web/api/queries/countif/README.md
new file mode 100644
index 000000000..200a4c9ed
--- /dev/null
+++ b/web/api/queries/countif/README.md
@@ -0,0 +1,36 @@
+<!--
+title: "CountIf"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/countif/README.md
+-->
+
+# CountIf
+
+> This query is available as `countif`.
+
+CountIf returns the percentage of points in the database that satisfy the condition supplied.
+
+The following conditions are available:
+
+- `!` or `!=` or `<>`, different than
+- `=` or `:`, equal to
+- `>`, greater than
+- `<`, less than
+- `>=`, greater or equal to
+- `<=`, less or equal to
+
+The target number and the desired condition can be set using the `group_options` query parameter, as a string, like in these examples:
+
+- `!0`, to match any number except zero.
+- `>=-3` to match any number bigger or equal to -3.
+
+. When an invalid condition is given, the web server can deliver a not accurate response.
+
+## how to use
+
+This query cannot be used in alarms.
+
+`countif` changes the units of charts. The result of the calculation is always from zero to 1, expressing the percentage of database points that matched the condition.
+
+In APIs and badges can be used like this: `&group=countif&group_options=>10` in the URL.
+
+
diff --git a/web/api/queries/countif/countif.c b/web/api/queries/countif/countif.c
new file mode 100644
index 000000000..369b20be9
--- /dev/null
+++ b/web/api/queries/countif/countif.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "countif.h"
+
+// ----------------------------------------------------------------------------
+// countif
+
+struct grouping_countif {
+ size_t (*comparison)(NETDATA_DOUBLE, NETDATA_DOUBLE);
+ NETDATA_DOUBLE target;
+ size_t count;
+ size_t matched;
+};
+
+static size_t countif_equal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v == target);
+}
+
+static size_t countif_notequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v != target);
+}
+
+static size_t countif_less(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v < target);
+}
+
+static size_t countif_lessequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v <= target);
+}
+
+static size_t countif_greater(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v > target);
+}
+
+static size_t countif_greaterequal(NETDATA_DOUBLE v, NETDATA_DOUBLE target) {
+ return (v >= target);
+}
+
+void grouping_create_countif(RRDR *r, const char *options __maybe_unused) {
+ struct grouping_countif *g = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_countif));
+ r->internal.grouping_data = g;
+
+ if(options && *options) {
+ // skip any leading spaces
+ while(isspace(*options)) options++;
+
+ // find the comparison function
+ switch(*options) {
+ case '!':
+ options++;
+ if(*options != '=' && *options != ':')
+ options--;
+ g->comparison = countif_notequal;
+ break;
+
+ case '>':
+ options++;
+ if(*options == '=' || *options == ':') {
+ g->comparison = countif_greaterequal;
+ }
+ else {
+ options--;
+ g->comparison = countif_greater;
+ }
+ break;
+
+ case '<':
+ options++;
+ if(*options == '>') {
+ g->comparison = countif_notequal;
+ }
+ else if(*options == '=' || *options == ':') {
+ g->comparison = countif_lessequal;
+ }
+ else {
+ options--;
+ g->comparison = countif_less;
+ }
+ break;
+
+ default:
+ case '=':
+ case ':':
+ g->comparison = countif_equal;
+ break;
+ }
+ if(*options) options++;
+
+ // skip everything up to the first digit
+ while(isspace(*options)) options++;
+
+ g->target = str2ndd(options, NULL);
+ }
+ else {
+ g->target = 0.0;
+ g->comparison = countif_equal;
+ }
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_countif(RRDR *r) {
+ struct grouping_countif *g = (struct grouping_countif *)r->internal.grouping_data;
+ g->matched = 0;
+ g->count = 0;
+}
+
+void grouping_free_countif(RRDR *r) {
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_countif(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_countif *g = (struct grouping_countif *)r->internal.grouping_data;
+ g->matched += g->comparison(value, g->target);
+ g->count++;
+}
+
+NETDATA_DOUBLE grouping_flush_countif(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_countif *g = (struct grouping_countif *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+
+ if(unlikely(!g->count)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else {
+ value = (NETDATA_DOUBLE)g->matched * 100 / (NETDATA_DOUBLE)g->count;
+ }
+
+ g->matched = 0;
+ g->count = 0;
+
+ return value;
+}
diff --git a/web/api/queries/countif/countif.h b/web/api/queries/countif/countif.h
new file mode 100644
index 000000000..0c7d2d7d1
--- /dev/null
+++ b/web/api/queries/countif/countif.h
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERY_COUNTIF_H
+#define NETDATA_API_QUERY_COUNTIF_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void grouping_create_countif(RRDR *r, const char *options __maybe_unused);
+extern void grouping_reset_countif(RRDR *r);
+extern void grouping_free_countif(RRDR *r);
+extern void grouping_add_countif(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_countif(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERY_COUNTIF_H
diff --git a/web/api/queries/des/des.c b/web/api/queries/des/des.c
index 8e4ca4bd4..a6c4e4051 100644
--- a/web/api/queries/des/des.c
+++ b/web/api/queries/des/des.c
@@ -8,13 +8,13 @@
// single exponential smoothing
struct grouping_des {
- calculated_number alpha;
- calculated_number alpha_other;
- calculated_number beta;
- calculated_number beta_other;
+ NETDATA_DOUBLE alpha;
+ NETDATA_DOUBLE alpha_other;
+ NETDATA_DOUBLE beta;
+ NETDATA_DOUBLE beta_other;
- calculated_number level;
- calculated_number trend;
+ NETDATA_DOUBLE level;
+ NETDATA_DOUBLE trend;
size_t count;
};
@@ -31,22 +31,22 @@ void grouping_init_des(void) {
}
}
-static inline calculated_number window(RRDR *r, struct grouping_des *g) {
+static inline NETDATA_DOUBLE window(RRDR *r, struct grouping_des *g) {
(void)g;
- calculated_number points;
+ NETDATA_DOUBLE points;
if(r->group == 1) {
// provide a running DES
- points = r->internal.points_wanted;
+ points = (NETDATA_DOUBLE)r->internal.points_wanted;
}
else {
// provide a SES with flush points
- points = r->group;
+ points = (NETDATA_DOUBLE)r->group;
}
// https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average
// A commonly used value for alpha is 2 / (N + 1)
- return (points > max_window_size) ? max_window_size : points;
+ return (points > (NETDATA_DOUBLE)max_window_size) ? (NETDATA_DOUBLE)max_window_size : points;
}
static inline void set_alpha(RRDR *r, struct grouping_des *g) {
@@ -69,8 +69,8 @@ static inline void set_beta(RRDR *r, struct grouping_des *g) {
//info("beta for chart '%s' is " CALCULATED_NUMBER_FORMAT, r->st->name, g->beta);
}
-void grouping_create_des(RRDR *r) {
- struct grouping_des *g = (struct grouping_des *)mallocz(sizeof(struct grouping_des));
+void grouping_create_des(RRDR *r, const char *options __maybe_unused) {
+ struct grouping_des *g = (struct grouping_des *)onewayalloc_mallocz(r->internal.owa, sizeof(struct grouping_des));
set_alpha(r, g);
set_beta(r, g);
g->level = 0.0;
@@ -92,11 +92,11 @@ void grouping_reset_des(RRDR *r) {
}
void grouping_free_des(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_des(RRDR *r, calculated_number value) {
+void grouping_add_des(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data;
if(likely(g->count > 0)) {
@@ -109,7 +109,7 @@ void grouping_add_des(RRDR *r, calculated_number value) {
}
// for the values, except the first
- calculated_number last_level = g->level;
+ NETDATA_DOUBLE last_level = g->level;
g->level = (g->alpha * value) + (g->alpha_other * (g->level + g->trend));
g->trend = (g->beta * (g->level - last_level)) + (g->beta_other * g->trend);
}
@@ -123,10 +123,10 @@ void grouping_add_des(RRDR *r, calculated_number value) {
//fprintf(stderr, "value: " CALCULATED_NUMBER_FORMAT ", level: " CALCULATED_NUMBER_FORMAT ", trend: " CALCULATED_NUMBER_FORMAT "\n", value, g->level, g->trend);
}
-calculated_number grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_des *g = (struct grouping_des *)r->internal.grouping_data;
- if(unlikely(!g->count || !calculated_number_isnumber(g->level))) {
+ if(unlikely(!g->count || !netdata_double_isnumber(g->level))) {
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
return 0.0;
}
diff --git a/web/api/queries/des/des.h b/web/api/queries/des/des.h
index bd361b865..8906d14eb 100644
--- a/web/api/queries/des/des.h
+++ b/web/api/queries/des/des.h
@@ -8,10 +8,10 @@
extern void grouping_init_des(void);
-extern void grouping_create_des(RRDR *r);
+extern void grouping_create_des(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_des(RRDR *r);
extern void grouping_free_des(RRDR *r);
-extern void grouping_add_des(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_des(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERIES_DES_H
diff --git a/web/api/queries/incremental_sum/incremental_sum.c b/web/api/queries/incremental_sum/incremental_sum.c
index 304d9aa74..afca530c3 100644
--- a/web/api/queries/incremental_sum/incremental_sum.c
+++ b/web/api/queries/incremental_sum/incremental_sum.c
@@ -6,13 +6,13 @@
// incremental sum
struct grouping_incremental_sum {
- calculated_number first;
- calculated_number last;
+ NETDATA_DOUBLE first;
+ NETDATA_DOUBLE last;
size_t count;
};
-void grouping_create_incremental_sum(RRDR *r) {
- r->internal.grouping_data = callocz(1, sizeof(struct grouping_incremental_sum));
+void grouping_create_incremental_sum(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_incremental_sum));
}
// resets when switches dimensions
@@ -25,11 +25,11 @@ void grouping_reset_incremental_sum(RRDR *r) {
}
void grouping_free_incremental_sum(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_incremental_sum(RRDR *r, calculated_number value) {
+void grouping_add_incremental_sum(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data;
if(unlikely(!g->count)) {
@@ -42,10 +42,10 @@ void grouping_add_incremental_sum(RRDR *r, calculated_number value) {
}
}
-calculated_number grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_incremental_sum *g = (struct grouping_incremental_sum *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(unlikely(!g->count)) {
value = 0.0;
diff --git a/web/api/queries/incremental_sum/incremental_sum.h b/web/api/queries/incremental_sum/incremental_sum.h
index 5b55ad3c8..6d908cef6 100644
--- a/web/api/queries/incremental_sum/incremental_sum.h
+++ b/web/api/queries/incremental_sum/incremental_sum.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_incremental_sum(RRDR *r);
+extern void grouping_create_incremental_sum(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_incremental_sum(RRDR *r);
extern void grouping_free_incremental_sum(RRDR *r);
-extern void grouping_add_incremental_sum(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_incremental_sum(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_INCREMENTAL_SUM_H
diff --git a/web/api/queries/max/max.c b/web/api/queries/max/max.c
index b6e723314..73cf9fa66 100644
--- a/web/api/queries/max/max.c
+++ b/web/api/queries/max/max.c
@@ -6,12 +6,12 @@
// max
struct grouping_max {
- calculated_number max;
+ NETDATA_DOUBLE max;
size_t count;
};
-void grouping_create_max(RRDR *r) {
- r->internal.grouping_data = callocz(1, sizeof(struct grouping_max));
+void grouping_create_max(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_max));
}
// resets when switches dimensions
@@ -23,23 +23,23 @@ void grouping_reset_max(RRDR *r) {
}
void grouping_free_max(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_max(RRDR *r, calculated_number value) {
+void grouping_add_max(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data;
- if(!g->count || calculated_number_fabs(value) > calculated_number_fabs(g->max)) {
+ if(!g->count || fabsndd(value) > fabsndd(g->max)) {
g->max = value;
g->count++;
}
}
-calculated_number grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_max *g = (struct grouping_max *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(unlikely(!g->count)) {
value = 0.0;
diff --git a/web/api/queries/max/max.h b/web/api/queries/max/max.h
index 7b606ce34..28913686b 100644
--- a/web/api/queries/max/max.h
+++ b/web/api/queries/max/max.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_max(RRDR *r);
+extern void grouping_create_max(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_max(RRDR *r);
extern void grouping_free_max(RRDR *r);
-extern void grouping_add_max(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_max(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_MAX_H
diff --git a/web/api/queries/median/README.md b/web/api/queries/median/README.md
index bb7d4c66b..5600284c2 100644
--- a/web/api/queries/median/README.md
+++ b/web/api/queries/median/README.md
@@ -13,6 +13,20 @@ The median is the value separating the higher half from the lower half of a data
`median` is not an accurate average. However, it eliminates all spikes, by sorting
all the values in a period, and selecting the value in the middle of the sorted array.
+Netdata also supports `trimmed-median`, which trims a percentage of the smaller and bigger values prior to finding the
+median. The following `trimmed-median` functions are defined:
+
+- `trimmed-median1`
+- `trimmed-median2`
+- `trimmed-median3`
+- `trimmed-median5`
+- `trimmed-median10`
+- `trimmed-median15`
+- `trimmed-median20`
+- `trimmed-median25`
+
+The function `trimmed-median` is an alias for `trimmed-median5`.
+
## how to use
Use it in alarms like this:
@@ -27,7 +41,8 @@ lookup: median -1m unaligned of my_dimension
`median` does not change the units. For example, if the chart units is `requests/sec`, the result
will be again expressed in the same units.
-It can also be used in APIs and badges as `&group=median` in the URL.
+It can also be used in APIs and badges as `&group=median` in the URL. Additionally, a percentage may be given with
+`&group_options=` to trim all small and big values before finding the median.
## Examples
diff --git a/web/api/queries/median/median.c b/web/api/queries/median/median.c
index bffcee12f..40fd4ec3a 100644
--- a/web/api/queries/median/median.c
+++ b/web/api/queries/median/median.c
@@ -2,27 +2,65 @@
#include "median.h"
-
// ----------------------------------------------------------------------------
// median
struct grouping_median {
size_t series_size;
size_t next_pos;
+ NETDATA_DOUBLE percent;
- LONG_DOUBLE series[];
+ NETDATA_DOUBLE *series;
};
-void grouping_create_median(RRDR *r) {
+void grouping_create_median_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
long entries = r->group;
- if(entries < 0) entries = 0;
+ if(entries < 10) entries = 10;
- struct grouping_median *g = (struct grouping_median *)callocz(1, sizeof(struct grouping_median) + entries * sizeof(LONG_DOUBLE));
+ struct grouping_median *g = (struct grouping_median *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_median));
+ g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
g->series_size = (size_t)entries;
+ g->percent = def;
+ if(options && *options) {
+ g->percent = str2ndd(options, NULL);
+ if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
+ if(g->percent < 0.0) g->percent = 0.0;
+ if(g->percent > 50.0) g->percent = 50.0;
+ }
+
+ g->percent = g->percent / 100.0;
r->internal.grouping_data = g;
}
+void grouping_create_median(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 0.0);
+}
+void grouping_create_trimmed_median1(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 1.0);
+}
+void grouping_create_trimmed_median2(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 2.0);
+}
+void grouping_create_trimmed_median3(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 3.0);
+}
+void grouping_create_trimmed_median5(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 5.0);
+}
+void grouping_create_trimmed_median10(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 10.0);
+}
+void grouping_create_trimmed_median15(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 15.0);
+}
+void grouping_create_trimmed_median20(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 20.0);
+}
+void grouping_create_trimmed_median25(RRDR *r, const char *options) {
+ grouping_create_median_internal(r, options, 25.0);
+}
+
// resets when switches dimensions
// so, clear everything to restart
void grouping_reset_median(RRDR *r) {
@@ -31,47 +69,72 @@ void grouping_reset_median(RRDR *r) {
}
void grouping_free_median(RRDR *r) {
- freez(r->internal.grouping_data);
+ struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
+ if(g) onewayalloc_freez(r->internal.owa, g->series);
+
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_median(RRDR *r, calculated_number value) {
+void grouping_add_median(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
if(unlikely(g->next_pos >= g->series_size)) {
- error("INTERNAL ERROR: median buffer overflow on chart '%s' - next_pos = %zu, series_size = %zu, r->group = %ld.", r->st->name, g->next_pos, g->series_size, r->group);
+ g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
+ g->series_size *= 2;
}
- else
- g->series[g->next_pos++] = (LONG_DOUBLE)value;
+
+ g->series[g->next_pos++] = value;
}
-calculated_number grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_median *g = (struct grouping_median *)r->internal.grouping_data;
- calculated_number value;
+ size_t available_slots = g->next_pos;
+ NETDATA_DOUBLE value;
- if(unlikely(!g->next_pos)) {
+ if(unlikely(!available_slots)) {
value = 0.0;
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
}
+ else if(available_slots == 1) {
+ value = g->series[0];
+ }
else {
- if(g->next_pos > 1) {
- sort_series(g->series, g->next_pos);
- value = (calculated_number)median_on_sorted_series(g->series, g->next_pos);
- }
- else
- value = (calculated_number)g->series[0];
+ sort_series(g->series, available_slots);
+
+ size_t start_slot = 0;
+ size_t end_slot = available_slots - 1;
+
+ if(g->percent > 0.0) {
+ NETDATA_DOUBLE min = g->series[0];
+ NETDATA_DOUBLE max = g->series[available_slots - 1];
+ NETDATA_DOUBLE delta = (max - min) * g->percent;
+
+ NETDATA_DOUBLE wanted_min = min + delta;
+ NETDATA_DOUBLE wanted_max = max - delta;
+
+ for (start_slot = 0; start_slot < available_slots; start_slot++)
+ if (g->series[start_slot] >= wanted_min) break;
- if(!calculated_number_isnumber(value)) {
- value = 0.0;
- *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ for (end_slot = available_slots - 1; end_slot > start_slot; end_slot--)
+ if (g->series[end_slot] <= wanted_max) break;
}
- //log_series_to_stderr(g->series, g->next_pos, value, "median");
+ if(start_slot == end_slot)
+ value = g->series[start_slot];
+ else
+ value = median_on_sorted_series(&g->series[start_slot], end_slot - start_slot + 1);
}
+ if(unlikely(!netdata_double_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ //log_series_to_stderr(g->series, g->next_pos, value, "median");
+
g->next_pos = 0;
return value;
}
-
diff --git a/web/api/queries/median/median.h b/web/api/queries/median/median.h
index 28d52b31e..dd1b3de61 100644
--- a/web/api/queries/median/median.h
+++ b/web/api/queries/median/median.h
@@ -6,10 +6,18 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_median(RRDR *r);
+extern void grouping_create_median(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median1(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median2(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median3(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median5(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median10(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median15(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median20(RRDR *r, const char *options);
+extern void grouping_create_trimmed_median25(RRDR *r, const char *options);
extern void grouping_reset_median(RRDR *r);
extern void grouping_free_median(RRDR *r);
-extern void grouping_add_median(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_median(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERIES_MEDIAN_H
diff --git a/web/api/queries/min/min.c b/web/api/queries/min/min.c
index 497bae04d..1752e9e0c 100644
--- a/web/api/queries/min/min.c
+++ b/web/api/queries/min/min.c
@@ -6,12 +6,12 @@
// min
struct grouping_min {
- calculated_number min;
+ NETDATA_DOUBLE min;
size_t count;
};
-void grouping_create_min(RRDR *r) {
- r->internal.grouping_data = callocz(1, sizeof(struct grouping_min));
+void grouping_create_min(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_min));
}
// resets when switches dimensions
@@ -23,23 +23,23 @@ void grouping_reset_min(RRDR *r) {
}
void grouping_free_min(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_min(RRDR *r, calculated_number value) {
+void grouping_add_min(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data;
- if(!g->count || calculated_number_fabs(value) < calculated_number_fabs(g->min)) {
+ if(!g->count || fabsndd(value) < fabsndd(g->min)) {
g->min = value;
g->count++;
}
}
-calculated_number grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_min *g = (struct grouping_min *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(unlikely(!g->count)) {
value = 0.0;
diff --git a/web/api/queries/min/min.h b/web/api/queries/min/min.h
index 9207c74f7..b8627f667 100644
--- a/web/api/queries/min/min.h
+++ b/web/api/queries/min/min.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_min(RRDR *r);
+extern void grouping_create_min(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_min(RRDR *r);
extern void grouping_free_min(RRDR *r);
-extern void grouping_add_min(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_min(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_MIN_H
diff --git a/web/api/queries/percentile/Makefile.am b/web/api/queries/percentile/Makefile.am
new file mode 100644
index 000000000..161784b8f
--- /dev/null
+++ b/web/api/queries/percentile/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/percentile/README.md b/web/api/queries/percentile/README.md
new file mode 100644
index 000000000..70afc7420
--- /dev/null
+++ b/web/api/queries/percentile/README.md
@@ -0,0 +1,58 @@
+<!--
+title: "Percentile"
+description: "Use percentile in API queries and health entities to find the 'percentile' value from a sample, eliminating any unwanted spikes in the returned metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/percentile/README.md
+-->
+
+# Percentile
+
+The percentile is the average value of a series using only the smaller N percentile of the values.
+(a population or a probability distribution).
+
+Netdata applies linear interpolation on the last point, if the percentile requested does not give a round number of
+points.
+
+The following percentile aliases are defined:
+
+- `percentile25`
+- `percentile50`
+- `percentile75`
+- `percentile80`
+- `percentile90`
+- `percentile95`
+- `percentile97`
+- `percentile98`
+- `percentile99`
+
+The default `percentile` is an alias for `percentile95`.
+Any percentile may be requested using the `group_options` query parameter.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: percentile95 -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`percentile` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=percentile` in the URL and the additional parameter `group_options`
+may be used to request any percentile (e.g. `&group=percentile&group_options=96`).
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=percentile95&after=-60&label=percentile95&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Percentile>.
diff --git a/web/api/queries/percentile/percentile.c b/web/api/queries/percentile/percentile.c
new file mode 100644
index 000000000..88f8600dd
--- /dev/null
+++ b/web/api/queries/percentile/percentile.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "percentile.h"
+
+// ----------------------------------------------------------------------------
+// median
+
+struct grouping_percentile {
+ size_t series_size;
+ size_t next_pos;
+ NETDATA_DOUBLE percent;
+
+ NETDATA_DOUBLE *series;
+};
+
+static void grouping_create_percentile_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
+ long entries = r->group;
+ if(entries < 10) entries = 10;
+
+ struct grouping_percentile *g = (struct grouping_percentile *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_percentile));
+ g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
+ g->series_size = (size_t)entries;
+
+ g->percent = def;
+ if(options && *options) {
+ g->percent = str2ndd(options, NULL);
+ if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
+ if(g->percent < 0.0) g->percent = 0.0;
+ if(g->percent > 100.0) g->percent = 100.0;
+ }
+
+ g->percent = g->percent / 100.0;
+ r->internal.grouping_data = g;
+}
+
+void grouping_create_percentile25(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 25.0);
+}
+void grouping_create_percentile50(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 50.0);
+}
+void grouping_create_percentile75(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 75.0);
+}
+void grouping_create_percentile80(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 80.0);
+}
+void grouping_create_percentile90(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 90.0);
+}
+void grouping_create_percentile95(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 95.0);
+}
+void grouping_create_percentile97(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 97.0);
+}
+void grouping_create_percentile98(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 98.0);
+}
+void grouping_create_percentile99(RRDR *r, const char *options) {
+ grouping_create_percentile_internal(r, options, 99.0);
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_percentile(RRDR *r) {
+ struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
+ g->next_pos = 0;
+}
+
+void grouping_free_percentile(RRDR *r) {
+ struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
+ if(g) onewayalloc_freez(r->internal.owa, g->series);
+
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_percentile(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
+
+ if(unlikely(g->next_pos >= g->series_size)) {
+ g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
+ g->series_size *= 2;
+ }
+
+ g->series[g->next_pos++] = value;
+}
+
+NETDATA_DOUBLE grouping_flush_percentile(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_percentile *g = (struct grouping_percentile *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+ size_t available_slots = g->next_pos;
+
+ if(unlikely(!available_slots)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else if(available_slots == 1) {
+ value = g->series[0];
+ }
+ else {
+ sort_series(g->series, available_slots);
+
+ NETDATA_DOUBLE min = g->series[0];
+ NETDATA_DOUBLE max = g->series[available_slots - 1];
+
+ if (min != max) {
+ size_t slots_to_use = (size_t)((NETDATA_DOUBLE)available_slots * g->percent);
+ if(!slots_to_use) slots_to_use = 1;
+
+ NETDATA_DOUBLE percent_to_use = (NETDATA_DOUBLE)slots_to_use / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_delta = g->percent - percent_to_use;
+
+ NETDATA_DOUBLE percent_interpolation_slot = 0.0;
+ NETDATA_DOUBLE percent_last_slot = 0.0;
+ if(percent_delta > 0.0) {
+ NETDATA_DOUBLE percent_to_use_plus_1_slot = (NETDATA_DOUBLE)(slots_to_use + 1) / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_1slot = percent_to_use_plus_1_slot - percent_to_use;
+
+ percent_interpolation_slot = percent_delta / percent_1slot;
+ percent_last_slot = 1 - percent_interpolation_slot;
+ }
+
+ int start_slot, stop_slot, step, last_slot, interpolation_slot;
+ if(min >= 0.0 && max >= 0.0) {
+ start_slot = 0;
+ stop_slot = start_slot + (int)slots_to_use;
+ last_slot = stop_slot - 1;
+ interpolation_slot = stop_slot;
+ step = 1;
+ }
+ else {
+ start_slot = (int)available_slots - 1;
+ stop_slot = start_slot - (int)slots_to_use;
+ last_slot = stop_slot + 1;
+ interpolation_slot = stop_slot;
+ step = -1;
+ }
+
+ value = 0.0;
+ for(int slot = start_slot; slot != stop_slot ; slot += step)
+ value += g->series[slot];
+
+ size_t counted = slots_to_use;
+ if(percent_interpolation_slot > 0.0 && interpolation_slot >= 0 && interpolation_slot < (int)available_slots) {
+ value += g->series[interpolation_slot] * percent_interpolation_slot;
+ value += g->series[last_slot] * percent_last_slot;
+ counted++;
+ }
+
+ value = value / (NETDATA_DOUBLE)counted;
+ }
+ else
+ value = min;
+ }
+
+ if(unlikely(!netdata_double_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ //log_series_to_stderr(g->series, g->next_pos, value, "percentile");
+
+ g->next_pos = 0;
+
+ return value;
+}
diff --git a/web/api/queries/percentile/percentile.h b/web/api/queries/percentile/percentile.h
new file mode 100644
index 000000000..709717ebd
--- /dev/null
+++ b/web/api/queries/percentile/percentile.h
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERIES_PERCENTILE_H
+#define NETDATA_API_QUERIES_PERCENTILE_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void grouping_create_percentile25(RRDR *r, const char *options);
+extern void grouping_create_percentile50(RRDR *r, const char *options);
+extern void grouping_create_percentile75(RRDR *r, const char *options);
+extern void grouping_create_percentile80(RRDR *r, const char *options);
+extern void grouping_create_percentile90(RRDR *r, const char *options);
+extern void grouping_create_percentile95(RRDR *r, const char *options);
+extern void grouping_create_percentile97(RRDR *r, const char *options);
+extern void grouping_create_percentile98(RRDR *r, const char *options);
+extern void grouping_create_percentile99(RRDR *r, const char *options );
+extern void grouping_reset_percentile(RRDR *r);
+extern void grouping_free_percentile(RRDR *r);
+extern void grouping_add_percentile(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_percentile(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERIES_PERCENTILE_H
diff --git a/web/api/queries/query.c b/web/api/queries/query.c
index 5c6c70411..d776f6d11 100644
--- a/web/api/queries/query.c
+++ b/web/api/queries/query.c
@@ -3,9 +3,9 @@
#include "query.h"
#include "web/api/formatters/rrd2json.h"
#include "rrdr.h"
-#include "database/ram/rrddim_mem.h"
#include "average/average.h"
+#include "countif/countif.h"
#include "incremental_sum/incremental_sum.h"
#include "max/max.h"
#include "median/median.h"
@@ -14,6 +14,8 @@
#include "stddev/stddev.h"
#include "ses/ses.h"
#include "des/des.h"
+#include "percentile/percentile.h"
+#include "trimmed_mean/trimmed_mean.h"
// ----------------------------------------------------------------------------
@@ -28,7 +30,7 @@ static struct {
// Allocate all required structures for a query.
// This is called once for each netdata query.
- void (*create)(struct rrdresult *r);
+ void (*create)(struct rrdresult *r, const char *options);
// Cleanup collected values, but don't destroy the structures.
// This is called when the query engine switches dimensions,
@@ -40,7 +42,7 @@ static struct {
// Add a single value into the calculation.
// The module may decide to cache it, or use it in the fly.
- void (*add)(struct rrdresult *r, calculated_number value);
+ void (*add)(struct rrdresult *r, NETDATA_DOUBLE value);
// Generate a single result for the values added so far.
// More values and points may be requested later.
@@ -48,7 +50,9 @@ static struct {
// when flushing it (so for a few modules it may be better to
// continue after a flush as if nothing changed, for others a
// cleanup of the internal structures may be required).
- calculated_number (*flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+ NETDATA_DOUBLE (*flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+ TIER_QUERY_FETCH tier_query_fetch;
} api_v1_data_groups[] = {
{.name = "average",
.hash = 0,
@@ -58,7 +62,8 @@ static struct {
.reset = grouping_reset_average,
.free = grouping_free_average,
.add = grouping_add_average,
- .flush = grouping_flush_average
+ .flush = grouping_flush_average,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "mean", // alias on 'average'
.hash = 0,
@@ -68,7 +73,107 @@ static struct {
.reset = grouping_reset_average,
.free = grouping_free_average,
.add = grouping_add_average,
- .flush = grouping_flush_average
+ .flush = grouping_flush_average,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean1",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN1,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean1,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean2",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN2,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean2,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean3",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN3,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean3,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean5",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN5,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean5,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean10",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN10,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean10,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean15",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN15,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean15,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean20",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN20,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean20,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean25",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN25,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean25,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-mean",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEAN5,
+ .init = NULL,
+ .create= grouping_create_trimmed_mean5,
+ .reset = grouping_reset_trimmed_mean,
+ .free = grouping_free_trimmed_mean,
+ .add = grouping_add_trimmed_mean,
+ .flush = grouping_flush_trimmed_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "incremental_sum",
.hash = 0,
@@ -78,7 +183,8 @@ static struct {
.reset = grouping_reset_incremental_sum,
.free = grouping_free_incremental_sum,
.add = grouping_add_incremental_sum,
- .flush = grouping_flush_incremental_sum
+ .flush = grouping_flush_incremental_sum,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "incremental-sum",
.hash = 0,
@@ -88,7 +194,8 @@ static struct {
.reset = grouping_reset_incremental_sum,
.free = grouping_free_incremental_sum,
.add = grouping_add_incremental_sum,
- .flush = grouping_flush_incremental_sum
+ .flush = grouping_flush_incremental_sum,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "median",
.hash = 0,
@@ -98,7 +205,217 @@ static struct {
.reset = grouping_reset_median,
.free = grouping_free_median,
.add = grouping_add_median,
- .flush = grouping_flush_median
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median1",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN1,
+ .init = NULL,
+ .create= grouping_create_trimmed_median1,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median2",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN2,
+ .init = NULL,
+ .create= grouping_create_trimmed_median2,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median3",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN3,
+ .init = NULL,
+ .create= grouping_create_trimmed_median3,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median5",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN5,
+ .init = NULL,
+ .create= grouping_create_trimmed_median5,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median10",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN10,
+ .init = NULL,
+ .create= grouping_create_trimmed_median10,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median15",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN15,
+ .init = NULL,
+ .create= grouping_create_trimmed_median15,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median20",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN20,
+ .init = NULL,
+ .create= grouping_create_trimmed_median20,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median25",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN25,
+ .init = NULL,
+ .create= grouping_create_trimmed_median25,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "trimmed-median",
+ .hash = 0,
+ .value = RRDR_GROUPING_TRIMMED_MEDIAN5,
+ .init = NULL,
+ .create= grouping_create_trimmed_median5,
+ .reset = grouping_reset_median,
+ .free = grouping_free_median,
+ .add = grouping_add_median,
+ .flush = grouping_flush_median,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile25",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE25,
+ .init = NULL,
+ .create= grouping_create_percentile25,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile50",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE50,
+ .init = NULL,
+ .create= grouping_create_percentile50,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile75",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE75,
+ .init = NULL,
+ .create= grouping_create_percentile75,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile80",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE80,
+ .init = NULL,
+ .create= grouping_create_percentile80,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile90",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE90,
+ .init = NULL,
+ .create= grouping_create_percentile90,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile95",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE95,
+ .init = NULL,
+ .create= grouping_create_percentile95,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile97",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE97,
+ .init = NULL,
+ .create= grouping_create_percentile97,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile98",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE98,
+ .init = NULL,
+ .create= grouping_create_percentile98,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile99",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE99,
+ .init = NULL,
+ .create= grouping_create_percentile99,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+ {.name = "percentile",
+ .hash = 0,
+ .value = RRDR_GROUPING_PERCENTILE95,
+ .init = NULL,
+ .create= grouping_create_percentile95,
+ .reset = grouping_reset_percentile,
+ .free = grouping_free_percentile,
+ .add = grouping_add_percentile,
+ .flush = grouping_flush_percentile,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "min",
.hash = 0,
@@ -108,7 +425,8 @@ static struct {
.reset = grouping_reset_min,
.free = grouping_free_min,
.add = grouping_add_min,
- .flush = grouping_flush_min
+ .flush = grouping_flush_min,
+ .tier_query_fetch = TIER_QUERY_FETCH_MIN
},
{.name = "max",
.hash = 0,
@@ -118,7 +436,8 @@ static struct {
.reset = grouping_reset_max,
.free = grouping_free_max,
.add = grouping_add_max,
- .flush = grouping_flush_max
+ .flush = grouping_flush_max,
+ .tier_query_fetch = TIER_QUERY_FETCH_MAX
},
{.name = "sum",
.hash = 0,
@@ -128,7 +447,8 @@ static struct {
.reset = grouping_reset_sum,
.free = grouping_free_sum,
.add = grouping_add_sum,
- .flush = grouping_flush_sum
+ .flush = grouping_flush_sum,
+ .tier_query_fetch = TIER_QUERY_FETCH_SUM
},
// standard deviation
@@ -140,7 +460,8 @@ static struct {
.reset = grouping_reset_stddev,
.free = grouping_free_stddev,
.add = grouping_add_stddev,
- .flush = grouping_flush_stddev
+ .flush = grouping_flush_stddev,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "cv", // coefficient variation is calculated by stddev
.hash = 0,
@@ -150,7 +471,8 @@ static struct {
.reset = grouping_reset_stddev, // not an error, stddev calculates this too
.free = grouping_free_stddev, // not an error, stddev calculates this too
.add = grouping_add_stddev, // not an error, stddev calculates this too
- .flush = grouping_flush_coefficient_of_variation
+ .flush = grouping_flush_coefficient_of_variation,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "rsd", // alias of 'cv'
.hash = 0,
@@ -160,7 +482,8 @@ static struct {
.reset = grouping_reset_stddev, // not an error, stddev calculates this too
.free = grouping_free_stddev, // not an error, stddev calculates this too
.add = grouping_add_stddev, // not an error, stddev calculates this too
- .flush = grouping_flush_coefficient_of_variation
+ .flush = grouping_flush_coefficient_of_variation,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
/*
@@ -172,7 +495,8 @@ static struct {
.reset = grouping_reset_stddev,
.free = grouping_free_stddev,
.add = grouping_add_stddev,
- .flush = grouping_flush_mean
+ .flush = grouping_flush_mean,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
*/
@@ -185,7 +509,8 @@ static struct {
.reset = grouping_reset_stddev,
.free = grouping_free_stddev,
.add = grouping_add_stddev,
- .flush = grouping_flush_variance
+ .flush = grouping_flush_variance,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
*/
@@ -193,44 +518,60 @@ static struct {
{.name = "ses",
.hash = 0,
.value = RRDR_GROUPING_SES,
- .init = grouping_init_ses,
+ .init = grouping_init_ses,
.create= grouping_create_ses,
.reset = grouping_reset_ses,
.free = grouping_free_ses,
.add = grouping_add_ses,
- .flush = grouping_flush_ses
+ .flush = grouping_flush_ses,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "ema", // alias for 'ses'
.hash = 0,
.value = RRDR_GROUPING_SES,
- .init = NULL,
+ .init = NULL,
.create= grouping_create_ses,
.reset = grouping_reset_ses,
.free = grouping_free_ses,
.add = grouping_add_ses,
- .flush = grouping_flush_ses
+ .flush = grouping_flush_ses,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
{.name = "ewma", // alias for ses
.hash = 0,
.value = RRDR_GROUPING_SES,
- .init = NULL,
+ .init = NULL,
.create= grouping_create_ses,
.reset = grouping_reset_ses,
.free = grouping_free_ses,
.add = grouping_add_ses,
- .flush = grouping_flush_ses
+ .flush = grouping_flush_ses,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
// double exponential smoothing
{.name = "des",
.hash = 0,
.value = RRDR_GROUPING_DES,
- .init = grouping_init_des,
+ .init = grouping_init_des,
.create= grouping_create_des,
.reset = grouping_reset_des,
.free = grouping_free_des,
.add = grouping_add_des,
- .flush = grouping_flush_des
+ .flush = grouping_flush_des,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
+ },
+
+ {.name = "countif",
+ .hash = 0,
+ .value = RRDR_GROUPING_COUNTIF,
+ .init = NULL,
+ .create= grouping_create_countif,
+ .reset = grouping_reset_countif,
+ .free = grouping_free_countif,
+ .add = grouping_add_countif,
+ .flush = grouping_flush_countif,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
},
// terminator
@@ -242,7 +583,8 @@ static struct {
.reset = grouping_reset_average,
.free = grouping_free_average,
.add = grouping_add_average,
- .flush = grouping_flush_average
+ .flush = grouping_flush_average,
+ .tier_query_fetch = TIER_QUERY_FETCH_AVERAGE
}
};
@@ -280,6 +622,41 @@ RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPI
return def;
}
+const char *web_client_api_request_v1_data_group_to_string(RRDR_GROUPING group) {
+ int i;
+
+ for(i = 0; api_v1_data_groups[i].name ; i++)
+ if(unlikely(group == api_v1_data_groups[i].value))
+ return api_v1_data_groups[i].name;
+
+ return "unknown";
+}
+
+static void rrdr_set_grouping_function(RRDR *r, RRDR_GROUPING group_method) {
+ int i, found = 0;
+ for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
+ if(api_v1_data_groups[i].value == group_method) {
+ r->internal.grouping_create = api_v1_data_groups[i].create;
+ r->internal.grouping_reset = api_v1_data_groups[i].reset;
+ r->internal.grouping_free = api_v1_data_groups[i].free;
+ r->internal.grouping_add = api_v1_data_groups[i].add;
+ r->internal.grouping_flush = api_v1_data_groups[i].flush;
+ r->internal.tier_query_fetch = api_v1_data_groups[i].tier_query_fetch;
+ found = 1;
+ }
+ }
+ if(!found) {
+ errno = 0;
+ internal_error(true, "QUERY: grouping method %u not found. Using 'average'", (unsigned int)group_method);
+ r->internal.grouping_create = grouping_create_average;
+ r->internal.grouping_reset = grouping_reset_average;
+ r->internal.grouping_free = grouping_free_average;
+ r->internal.grouping_add = grouping_add_average;
+ r->internal.grouping_flush = grouping_flush_average;
+ r->internal.tier_query_fetch = TIER_QUERY_FETCH_AVERAGE;
+ }
+}
+
// ----------------------------------------------------------------------------
static void rrdr_disable_not_selected_dimensions(RRDR *r, RRDR_OPTIONS options, const char *dims,
@@ -335,7 +712,7 @@ static void rrdr_disable_not_selected_dimensions(RRDR *r, RRDR_OPTIONS options,
// check if all dimensions are hidden
if(unlikely(!dims_not_hidden_not_zero && dims_selected)) {
- // there are a few selected dimensions
+ // there are a few selected dimensions,
// but they are all zero
// enable the selected ones
// to avoid returning an empty chart
@@ -352,22 +729,20 @@ static inline RRDR_VALUE_FLAGS *UNUSED_FUNCTION(rrdr_line_options)(RRDR *r, long
return &r->o[ rrdr_line * r->d ];
}
-static inline calculated_number *UNUSED_FUNCTION(rrdr_line_values)(RRDR *r, long rrdr_line) {
+static inline NETDATA_DOUBLE *UNUSED_FUNCTION(rrdr_line_values)(RRDR *r, long rrdr_line) {
return &r->v[ rrdr_line * r->d ];
}
static inline long rrdr_line_init(RRDR *r, time_t t, long rrdr_line) {
rrdr_line++;
- #ifdef NETDATA_INTERNAL_CHECKS
-
- if(unlikely(rrdr_line >= r->n))
- error("INTERNAL ERROR: requested to step above RRDR size for chart '%s'", r->st->name);
+ internal_error(rrdr_line >= r->n,
+ "QUERY: requested to step above RRDR size for chart '%s'",
+ r->st->name);
- if(unlikely(r->t[rrdr_line] != 0 && r->t[rrdr_line] != t))
- error("INTERNAL ERROR: overwriting the timestamp of RRDR line %zu from %zu to %zu, of chart '%s'", (size_t)rrdr_line, (size_t)r->t[rrdr_line], (size_t)t, r->st->name);
-
- #endif
+ internal_error(r->t[rrdr_line] != 0 && r->t[rrdr_line] != t,
+ "QUERY: overwriting the timestamp of RRDR line %zu from %zu to %zu, of chart '%s'",
+ (size_t)rrdr_line, (size_t)r->t[rrdr_line], (size_t)t, r->st->name);
// save the time
r->t[rrdr_line] = t;
@@ -381,337 +756,737 @@ static inline void rrdr_done(RRDR *r, long rrdr_line) {
// ----------------------------------------------------------------------------
-// fill RRDR for a single dimension
+// tier management
-static inline void do_dimension_variablestep(
- RRDR *r
- , long points_wanted
- , RRDDIM *rd
- , long dim_id_in_rrdr
- , time_t after_wanted
- , time_t before_wanted
- , uint32_t options
-){
-// RRDSET *st = r->st;
+static int rrddim_find_best_tier_for_timeframe(RRDDIM *rd, time_t after_wanted, time_t before_wanted, long points_wanted) {
+ if(unlikely(storage_tiers < 2))
+ return 0;
+
+ if(unlikely(after_wanted == before_wanted || points_wanted <= 0 || !rd || !rd->rrdset)) {
- time_t
- now = after_wanted,
- dt = r->update_every,
- max_date = 0,
- min_date = 0;
+ if(!rd)
+ internal_error(true, "QUERY: NULL dimension - invalid params to tier calculation");
+ else
+ internal_error(true, "QUERY: chart '%s' dimension '%s' invalid params to tier calculation",
+ (rd->rrdset)?rd->rrdset->name:"unknown", rd->name);
- long
-// group_size = r->group,
- points_added = 0,
- values_in_group = 0,
- values_in_group_non_zero = 0,
- rrdr_line = -1;
+ return 0;
+ }
- RRDR_VALUE_FLAGS
- group_value_flags = RRDR_VALUE_NOTHING;
+ //BUFFER *wb = buffer_create(1000);
+ //buffer_sprintf(wb, "Best tier for chart '%s', dim '%s', from %ld to %ld (dur %ld, every %d), points %ld",
+ // rd->rrdset->name, rd->name, after_wanted, before_wanted, before_wanted - after_wanted, rd->update_every, points_wanted);
- struct rrddim_query_handle handle;
+ long weight[storage_tiers];
+
+ for(int tier = 0; tier < storage_tiers ; tier++) {
+ if(unlikely(!rd->tiers[tier])) {
+ internal_error(true, "QUERY: tier %d of chart '%s' dimension '%s' not initialized",
+ tier, rd->rrdset->name, rd->name);
+ // buffer_free(wb);
+ return 0;
+ }
+
+ time_t first_t = rd->tiers[tier]->query_ops.oldest_time(rd->tiers[tier]->db_metric_handle);
+ time_t last_t = rd->tiers[tier]->query_ops.latest_time(rd->tiers[tier]->db_metric_handle);
+
+ time_t common_after = MAX(first_t, after_wanted);
+ time_t common_before = MIN(last_t, before_wanted);
+
+ long time_coverage = (common_before - common_after) * 1000 / (before_wanted - after_wanted);
+ if(time_coverage < 0) time_coverage = 0;
+
+ int update_every = (int)rd->tiers[tier]->tier_grouping * (int)rd->update_every;
+ if(unlikely(update_every == 0)) {
+ internal_error(true, "QUERY: update_every of tier %d for chart '%s' dimension '%s' is zero. tg = %d, ue = %d",
+ tier, rd->rrdset->name, rd->name, rd->tiers[tier]->tier_grouping, rd->update_every);
+ // buffer_free(wb);
+ return 0;
+ }
- calculated_number min = r->min, max = r->max;
- size_t db_points_read = 0;
- time_t db_now = now;
- storage_number n_curr, n_prev = SN_EMPTY_SLOT;
- calculated_number value;
+ long points_available = (before_wanted - after_wanted) / update_every;
+ long points_delta = points_available - points_wanted;
+ long points_coverage = (points_delta < 0) ? points_available * 1000 / points_wanted: 1000;
- for(rd->state->query_ops.init(rd, &handle, now, before_wanted) ; points_added < points_wanted ; now += dt) {
- // make sure we return data in the proper time range
- if (unlikely(now > before_wanted)) {
+ if(points_available <= 0)
+ weight[tier] = -LONG_MAX;
+ else
+ weight[tier] = points_coverage;
+
+ // buffer_sprintf(wb, ": tier %d, first %ld, last %ld (dur %ld, tg %d, every %d), points %ld, tcoverage %ld, pcoverage %ld, weight %ld",
+ // tier, first_t, last_t, last_t - first_t, rd->tiers[tier]->tier_grouping, update_every,
+ // points_available, time_coverage, points_coverage, weight[tier]);
+ }
+
+ int best_tier = 0;
+ for(int tier = 1; tier < storage_tiers ; tier++) {
+ if(weight[tier] >= weight[best_tier])
+ best_tier = tier;
+ }
+
+ if(weight[best_tier] == -LONG_MAX)
+ best_tier = 0;
+
+ //buffer_sprintf(wb, ": final best tier %d", best_tier);
+ //internal_error(true, "%s", buffer_tostring(wb));
+ //buffer_free(wb);
+
+ return best_tier;
+}
+
+static int rrdset_find_natural_update_every_for_timeframe(RRDSET *st, time_t after_wanted, time_t before_wanted, long points_wanted, RRDR_OPTIONS options, int tier) {
+ int ret = st->update_every;
+
+ if(unlikely(!st->dimensions))
+ return ret;
+
+ rrdset_rdlock(st);
+ int best_tier;
+
+ if(options & RRDR_OPTION_SELECTED_TIER && tier >= 0 && tier < storage_tiers)
+ best_tier = tier;
+ else
+ best_tier = rrddim_find_best_tier_for_timeframe(st->dimensions, after_wanted, before_wanted, points_wanted);
+
+ if(!st->dimensions->tiers[best_tier]) {
+ internal_error(
+ true,
+ "QUERY: tier %d on chart '%s', is not initialized", best_tier, st->name);
+ }
+ else {
+ ret = (int)st->dimensions->tiers[best_tier]->tier_grouping * (int)st->update_every;
+ if(unlikely(!ret)) {
+ internal_error(
+ true,
+ "QUERY: update_every calculated to be zero on chart '%s', tier_grouping %d, update_every %d",
+ st->name, st->dimensions->tiers[best_tier]->tier_grouping, st->update_every);
+
+ ret = st->update_every;
+ }
+ }
+
+ rrdset_unlock(st);
+
+ return ret;
+}
+
+// ----------------------------------------------------------------------------
+// query ops
+
+typedef struct query_point {
+ time_t end_time;
+ time_t start_time;
+ NETDATA_DOUBLE value;
+ NETDATA_DOUBLE anomaly;
+ SN_FLAGS flags;
#ifdef NETDATA_INTERNAL_CHECKS
- r->internal.log = "stopped, because attempted to access the db after 'wanted before'";
+ size_t id;
#endif
- break;
- }
- if (unlikely(now < after_wanted)) {
+} QUERY_POINT;
+
+QUERY_POINT QUERY_POINT_EMPTY = {
+ .end_time = 0,
+ .start_time = 0,
+ .value = NAN,
+ .anomaly = 0,
+ .flags = SN_FLAG_NONE,
#ifdef NETDATA_INTERNAL_CHECKS
- r->internal.log = "skipped, because attempted to access the db before 'wanted after'";
+ .id = 0,
#endif
- continue;
- }
+};
- while (now >= db_now && (!rd->state->query_ops.is_finished(&handle) ||
- does_storage_number_exist(n_prev))) {
- value = NAN;
- if (does_storage_number_exist(n_prev)) {
- // use the previously read database value
- n_curr = n_prev;
- } else {
- // read the value from the database
- n_curr = rd->state->query_ops.next_metric(&handle, &db_now);
- }
- n_prev = SN_EMPTY_SLOT;
- // db_now has a different value than above
- if (likely(now >= db_now)) {
- if (likely(does_storage_number_exist(n_curr))) {
- if (options & RRDR_OPTION_ANOMALY_BIT)
- value = (n_curr & SN_ANOMALY_BIT) ? 0.0 : 100.0;
- else
- value = unpack_storage_number(n_curr);
-
- if (likely(value != 0.0))
- values_in_group_non_zero++;
-
- if (unlikely(did_storage_number_reset(n_curr)))
- group_value_flags |= RRDR_VALUE_RESET;
- }
- } else {
- // We must postpone processing the value and fill the result with gaps instead
- if (likely(does_storage_number_exist(n_curr))) {
- n_prev = n_curr;
- }
- }
- // add this value to grouping
- if(likely(!isnan(value)))
- r->internal.grouping_add(r, value);
+#ifdef NETDATA_INTERNAL_CHECKS
+#define query_point_set_id(point, point_id) (point).id = point_id
+#else
+#define query_point_set_id(point, point_id) debug_dummy()
+#endif
- values_in_group++;
- db_points_read++;
- }
+typedef struct query_plan_entry {
+ size_t tier;
+ time_t after;
+ time_t before;
+} QUERY_PLAN_ENTRY;
- if (0 == values_in_group) {
- // add NAN to grouping
- r->internal.grouping_add(r, NAN);
- }
+typedef struct query_plan {
+ size_t entries;
+ QUERY_PLAN_ENTRY data[RRD_STORAGE_TIERS*2];
+} QUERY_PLAN;
+
+typedef struct query_engine_ops {
+ // configuration
+ RRDR *r;
+ RRDDIM *rd;
+ time_t view_update_every;
+ time_t query_granularity;
+ TIER_QUERY_FETCH tier_query_fetch;
+
+ // query planer
+ QUERY_PLAN plan;
+ size_t current_plan;
+ time_t current_plan_expire_time;
+
+ // storage queries
+ size_t tier;
+ struct rrddim_tier *tier_ptr;
+ struct rrddim_query_handle handle;
+ STORAGE_POINT (*next_metric)(struct rrddim_query_handle *handle);
+ int (*is_finished)(struct rrddim_query_handle *handle);
+ void (*finalize)(struct rrddim_query_handle *handle);
- rrdr_line = rrdr_line_init(r, now, rrdr_line);
+ // aggregating points over time
+ void (*grouping_add)(struct rrdresult *r, NETDATA_DOUBLE value);
+ NETDATA_DOUBLE (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+ size_t group_points_non_zero;
+ size_t group_points_added;
+ NETDATA_DOUBLE group_anomaly_rate;
+ RRDR_VALUE_FLAGS group_value_flags;
- if(unlikely(!min_date)) min_date = now;
- max_date = now;
+ // statistics
+ size_t db_total_points_read;
+ size_t db_points_read_per_tier[RRD_STORAGE_TIERS];
+} QUERY_ENGINE_OPS;
- // find the place to store our values
- RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_line * r->d + dim_id_in_rrdr];
- // update the dimension options
- if(likely(values_in_group_non_zero))
- r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
+// ----------------------------------------------------------------------------
+// query planer
- // store the specific point options
- *rrdr_value_options_ptr = group_value_flags;
+#define query_plan_should_switch_plan(ops, now) ((now) >= (ops).current_plan_expire_time)
- // store the value
- value = r->internal.grouping_flush(r, rrdr_value_options_ptr);
- r->v[rrdr_line * r->d + dim_id_in_rrdr] = value;
+static void query_planer_activate_plan(QUERY_ENGINE_OPS *ops, size_t plan_id, time_t overwrite_after) {
+ if(unlikely(plan_id >= ops->plan.entries))
+ plan_id = ops->plan.entries - 1;
- if(likely(points_added || dim_id_in_rrdr)) {
- // find the min/max across all dimensions
+ time_t after = ops->plan.data[plan_id].after;
+ time_t before = ops->plan.data[plan_id].before;
- if(unlikely(value < min)) min = value;
- if(unlikely(value > max)) max = value;
+ if(overwrite_after > after && overwrite_after < before)
+ after = overwrite_after;
- }
- else {
- // runs only when dim_id_in_rrdr == 0 && points_added == 0
- // so, on the first point added for the query.
- min = max = value;
+ ops->tier = ops->plan.data[plan_id].tier;
+ ops->tier_ptr = ops->rd->tiers[ops->tier];
+ ops->tier_ptr->query_ops.init(ops->tier_ptr->db_metric_handle, &ops->handle, after, before, ops->r->internal.tier_query_fetch);
+ ops->next_metric = ops->tier_ptr->query_ops.next_metric;
+ ops->is_finished = ops->tier_ptr->query_ops.is_finished;
+ ops->finalize = ops->tier_ptr->query_ops.finalize;
+ ops->current_plan = plan_id;
+ ops->current_plan_expire_time = ops->plan.data[plan_id].before;
+}
+
+static void query_planer_next_plan(QUERY_ENGINE_OPS *ops, time_t now, time_t last_point_end_time) {
+ internal_error(now < ops->current_plan_expire_time && now < ops->plan.data[ops->current_plan].before,
+ "QUERY: switching query plan too early!");
+
+ time_t next_plan_before_time;
+ do {
+ ops->current_plan++;
+
+ if (ops->current_plan >= ops->plan.entries) {
+ ops->current_plan = ops->plan.entries - 1;
+ return;
}
- points_added++;
- values_in_group = 0;
- group_value_flags = RRDR_VALUE_NOTHING;
- values_in_group_non_zero = 0;
+ next_plan_before_time = ops->plan.data[ops->current_plan].before;
+ } while(now >= next_plan_before_time || last_point_end_time >= next_plan_before_time);
+
+ if(ops->finalize) {
+ ops->finalize(&ops->handle);
+ ops->finalize = NULL;
}
- rd->state->query_ops.finalize(&handle);
- r->internal.db_points_read += db_points_read;
- r->internal.result_points_generated += points_added;
+ query_planer_activate_plan(ops, ops->current_plan, MIN(now, last_point_end_time));
- r->min = min;
- r->max = max;
- r->before = max_date;
- r->after = min_date - (r->group - 1) * dt;
- rrdr_done(r, rrdr_line);
+ // internal_error(true, "QUERY: switched plan to %zu (all is %zu), previous expiration was %ld, this starts at %ld, now is %ld, last_point_end_time %ld", ops->current_plan, ops->plan.entries, ops->plan.data[ops->current_plan-1].before, ops->plan.data[ops->current_plan].after, now, last_point_end_time);
+}
- #ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely(r->rows != points_added))
- error("INTERNAL ERROR: %s.%s added %zu rows, but RRDR says I added %zu.", r->st->name, rd->name, (size_t)points_added, (size_t)r->rows);
- #endif
+static int compare_query_plan_entries_on_start_time(const void *a, const void *b) {
+ QUERY_PLAN_ENTRY *p1 = (QUERY_PLAN_ENTRY *)a;
+ QUERY_PLAN_ENTRY *p2 = (QUERY_PLAN_ENTRY *)b;
+ return (p1->after < p2->after)?-1:1;
}
-static inline void do_dimension_fixedstep(
- RRDR *r
- , long points_wanted
- , RRDDIM *rd
- , long dim_id_in_rrdr
- , time_t after_wanted
- , time_t before_wanted
- , uint32_t options
-){
- time_t now = after_wanted,
- dt = r->update_every / r->group, /* usually is st->update_every */
- max_date = 0,
- min_date = 0;
+static void query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before_wanted, long points_wanted) {
+ RRDDIM *rd = ops->rd;
- long group_size = r->group,
- points_added = 0,
- values_in_group = 0,
- values_in_group_non_zero = 0,
- rrdr_line = -1;
+ //BUFFER *wb = buffer_create(1000);
+ //buffer_sprintf(wb, "QUERY PLAN for chart '%s' dimension '%s', from %ld to %ld:", rd->rrdset->name, rd->name, after_wanted, before_wanted);
- RRDR_VALUE_FLAGS group_value_flags = RRDR_VALUE_NOTHING;
+ // put our selected tier as the first plan
+ size_t selected_tier;
- struct rrddim_query_handle handle;
+ if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER && ops->r->internal.query_tier >= 0 && ops->r->internal.query_tier < storage_tiers) {
+ selected_tier = ops->r->internal.query_tier;
+ }
+ else {
- calculated_number min = r->min, max = r->max;
- size_t db_points_read = 0;
- time_t db_now = now;
- time_t first_time_t = rrddim_first_entry_t(rd);
+ selected_tier = rrddim_find_best_tier_for_timeframe(rd, after_wanted, before_wanted, points_wanted);
- // cache the function pointers we need in the loop
- storage_number (*next_metric)(struct rrddim_query_handle *handle, time_t *current_time) = rd->state->query_ops.next_metric;
- void (*grouping_add)(struct rrdresult *r, calculated_number value) = r->internal.grouping_add;
- calculated_number (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) = r->internal.grouping_flush;
- RRD_MEMORY_MODE rrd_memory_mode = rd->rrd_memory_mode;
+ if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)
+ ops->r->internal.query_options &= ~RRDR_OPTION_SELECTED_TIER;
+ }
- for(rd->state->query_ops.init(rd, &handle, now, before_wanted) ; points_added < points_wanted ; now += dt) {
- // make sure we return data in the proper time range
- if(unlikely(now > before_wanted)) {
-#ifdef NETDATA_INTERNAL_CHECKS
- r->internal.log = "stopped, because attempted to access the db after 'wanted before'";
-#endif
- break;
+ ops->plan.entries = 1;
+ ops->plan.data[0].tier = selected_tier;
+ ops->plan.data[0].after = rd->tiers[selected_tier]->query_ops.oldest_time(rd->tiers[selected_tier]->db_metric_handle);
+ ops->plan.data[0].before = rd->tiers[selected_tier]->query_ops.latest_time(rd->tiers[selected_tier]->db_metric_handle);
+
+ if(!(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)) {
+ // the selected tier
+ time_t selected_tier_first_time_t = ops->plan.data[0].after;
+ time_t selected_tier_last_time_t = ops->plan.data[0].before;
+
+ //buffer_sprintf(wb, ": SELECTED tier %zu, from %ld to %ld", selected_tier, ops->plan.data[0].after, ops->plan.data[0].before);
+
+ // check if our selected tier can start the query
+ if (selected_tier_first_time_t > after_wanted) {
+ // we need some help from other tiers
+ for (int tr = (int)selected_tier + 1; tr < storage_tiers; tr++) {
+ // find the first time of this tier
+ time_t first_time_t = rd->tiers[tr]->query_ops.oldest_time(rd->tiers[tr]->db_metric_handle);
+
+ //buffer_sprintf(wb, ": EVAL AFTER tier %d, %ld", tier, first_time_t);
+
+ // can it help?
+ if (first_time_t < selected_tier_first_time_t) {
+ // it can help us add detail at the beginning of the query
+ QUERY_PLAN_ENTRY t = {
+ .tier = tr,
+ .after = (first_time_t < after_wanted) ? after_wanted : first_time_t,
+ .before = selected_tier_first_time_t};
+ ops->plan.data[ops->plan.entries++] = t;
+
+ // prepare for the tier
+ selected_tier_first_time_t = t.after;
+
+ if (t.after <= after_wanted)
+ break;
+ }
+ }
}
- if(unlikely(now < after_wanted)) {
-#ifdef NETDATA_INTERNAL_CHECKS
- r->internal.log = "skipped, because attempted to access the db before 'wanted after'";
-#endif
- continue;
+ // check if our selected tier can finish the query
+ if (selected_tier_last_time_t < before_wanted) {
+ // we need some help from other tiers
+ for (int tr = (int)selected_tier - 1; tr >= 0; tr--) {
+ // find the last time of this tier
+ time_t last_time_t = rd->tiers[tr]->query_ops.latest_time(rd->tiers[tr]->db_metric_handle);
+
+ //buffer_sprintf(wb, ": EVAL BEFORE tier %d, %ld", tier, last_time_t);
+
+ // can it help?
+ if (last_time_t > selected_tier_last_time_t) {
+ // it can help us add detail at the end of the query
+ QUERY_PLAN_ENTRY t = {
+ .tier = tr,
+ .after = selected_tier_last_time_t,
+ .before = (last_time_t > before_wanted) ? before_wanted : last_time_t};
+ ops->plan.data[ops->plan.entries++] = t;
+
+ // prepare for the tier
+ selected_tier_last_time_t = t.before;
+
+ if (t.before >= before_wanted)
+ break;
+ }
+ }
}
+ }
- // read the value from the database
- //storage_number n = rd->values[slot];
+ // sort the query plan
+ if(ops->plan.entries > 1)
+ qsort(&ops->plan.data, ops->plan.entries, sizeof(QUERY_PLAN_ENTRY), compare_query_plan_entries_on_start_time);
-#ifdef NETDATA_INTERNAL_CHECKS
- struct mem_query_handle* mem_handle = (struct mem_query_handle*)handle.handle;
- if ((rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE) &&
- (rrdset_time2slot(r->st, now) != (long unsigned)(mem_handle->slot))) {
- error("INTERNAL CHECK: Unaligned query for %s, database slot: %lu, expected slot: %lu", rd->id, (long unsigned)mem_handle->slot, rrdset_time2slot(r->st, now));
- }
-#endif
+ // make sure it has the whole timeframe we need
+ ops->plan.data[0].after = after_wanted;
+ ops->plan.data[ops->plan.entries - 1].before = before_wanted;
- db_now = now; // this is needed to set db_now in case the next_metric implementation does not set it
+ //buffer_sprintf(wb, ": FINAL STEPS %zu", ops->plan.entries);
- storage_number n;
- calculated_number value;
+ //for(size_t i = 0; i < ops->plan.entries ;i++)
+ // buffer_sprintf(wb, ": STEP %zu = use tier %zu from %ld to %ld", i+1, ops->plan.data[i].tier, ops->plan.data[i].after, ops->plan.data[i].before);
- if (unlikely(rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE && now <= first_time_t)) {
- n = SN_EMPTY_SLOT;
- value = NAN;
- }
- else {
- // load the metric value
- n = next_metric(&handle, &db_now);
- db_points_read++;
-
- // and unpack it
- if(likely(does_storage_number_exist(n))) {
- if (options & RRDR_OPTION_ANOMALY_BIT)
- value = (n & SN_ANOMALY_BIT) ? 0.0 : 100.0;
- else
- value = unpack_storage_number(n);
- }
- else
- value = NAN;
- }
+ //internal_error(true, "%s", buffer_tostring(wb));
- if(unlikely(db_now > before_wanted)) {
-#ifdef NETDATA_INTERNAL_CHECKS
- r->internal.log = "stopped, because attempted to access the db after 'wanted before'";
-#endif
- break;
- }
+ query_planer_activate_plan(ops, 0, 0);
+}
- // this loop exists only to fill nulls
- // so, if there is a value already, we use it for the first iteration
- // but the following iterations will just fill nulls to the destination
- for ( ; now <= db_now ; now += dt, value = NAN, n = SN_EMPTY_SLOT) {
- if(likely(does_storage_number_exist(n))) {
-
-#if defined(NETDATA_INTERNAL_CHECKS) && defined(ENABLE_DBENGINE)
- if(now >= db_now) {
- struct rrdeng_query_handle *rrd_handle = (struct rrdeng_query_handle *)handle.handle;
- if ((rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) && (now != rrd_handle->now))
- error(
- "INTERNAL CHECK: Unaligned query for %s, database time: %ld, expected time: %ld",
- rd->id,
- (long)rrd_handle->now,
- (long)now);
- }
-#endif
- if(likely(value != 0.0))
- values_in_group_non_zero++;
+// ----------------------------------------------------------------------------
+// dimension level query engine
+
+#define query_interpolate_point(this_point, last_point, now) do { \
+ if(likely( \
+ /* the point to interpolate is more than 1s wide */ \
+ (this_point).end_time - (this_point).start_time > 1 \
+ \
+ /* the two points are exactly next to each other */ \
+ && (last_point).end_time == (this_point).start_time \
+ \
+ /* both points are valid numbers */ \
+ && netdata_double_isnumber((this_point).value) \
+ && netdata_double_isnumber((last_point).value) \
+ \
+ )) { \
+ (this_point).value = (last_point).value + ((this_point).value - (last_point).value) * (1.0 - (NETDATA_DOUBLE)((this_point).end_time - (now)) / (NETDATA_DOUBLE)((this_point).end_time - (this_point).start_time)); \
+ (this_point).end_time = now; \
+ } \
+} while(0)
+
+#define query_add_point_to_group(r, point, ops) do { \
+ if(likely(netdata_double_isnumber((point).value))) { \
+ if(likely(fpclassify((point).value) != FP_ZERO)) \
+ (ops).group_points_non_zero++; \
+ \
+ if(unlikely((point).flags & SN_FLAG_RESET)) \
+ (ops).group_value_flags |= RRDR_VALUE_RESET; \
+ \
+ (ops).grouping_add(r, (point).value); \
+ } \
+ \
+ (ops).group_points_added++; \
+ (ops).group_anomaly_rate += (point).anomaly; \
+} while(0)
+
+static inline void rrd2rrdr_do_dimension(
+ RRDR *r
+ , long points_wanted
+ , RRDDIM *rd
+ , long dim_id_in_rrdr
+ , time_t after_wanted
+ , time_t before_wanted
+){
+ time_t max_date = 0,
+ min_date = 0;
+
+ size_t points_added = 0;
+
+ QUERY_ENGINE_OPS ops = {
+ .r = r,
+ .rd = rd,
+ .grouping_add = r->internal.grouping_add,
+ .grouping_flush = r->internal.grouping_flush,
+ .tier_query_fetch = r->internal.tier_query_fetch,
+ .view_update_every = r->update_every,
+ .query_granularity = r->update_every / r->group,
+ .group_value_flags = RRDR_VALUE_NOTHING
+ };
+
+ long rrdr_line = -1;
+ bool use_anomaly_bit_as_value = (r->internal.query_options & RRDR_OPTION_ANOMALY_BIT) ? true : false;
+
+ query_plan(&ops, after_wanted, before_wanted, points_wanted);
+
+ NETDATA_DOUBLE min = r->min, max = r->max;
+
+ QUERY_POINT last2_point = QUERY_POINT_EMPTY;
+ QUERY_POINT last1_point = QUERY_POINT_EMPTY;
+ QUERY_POINT new_point = QUERY_POINT_EMPTY;
+
+ time_t now_start_time = after_wanted - ops.query_granularity;
+ time_t now_end_time = after_wanted + ops.view_update_every - ops.query_granularity;
- if(unlikely(did_storage_number_reset(n)))
- group_value_flags |= RRDR_VALUE_RESET;
+ // The main loop, based on the query granularity we need
+ for( ; (long)points_added < points_wanted ; now_start_time = now_end_time, now_end_time += ops.view_update_every) {
- grouping_add(r, value);
+ if(query_plan_should_switch_plan(ops, now_end_time))
+ query_planer_next_plan(&ops, now_end_time, new_point.end_time);
+
+ // read all the points of the db, prior to the time we need (now_end_time)
+
+
+ size_t count_same_end_time = 0;
+ while(count_same_end_time < 100) {
+ if(likely(count_same_end_time == 0)) {
+ last2_point = last1_point;
+ last1_point = new_point;
}
- // add this value for grouping
- values_in_group++;
+ if(unlikely(ops.is_finished(&ops.handle))) {
+ if(count_same_end_time != 0) {
+ last2_point = last1_point;
+ last1_point = new_point;
+ }
+ new_point = QUERY_POINT_EMPTY;
+ new_point.start_time = last1_point.end_time;
+ new_point.end_time = now_end_time;
+ break;
+ }
+
+ // fetch the new point
+ {
+ STORAGE_POINT sp = ops.next_metric(&ops.handle);
+
+ ops.db_points_read_per_tier[ops.tier]++;
+ ops.db_total_points_read++;
+
+ new_point.start_time = sp.start_time;
+ new_point.end_time = sp.end_time;
+ new_point.anomaly = sp.count ? (NETDATA_DOUBLE)sp.anomaly_count * 100.0 / (NETDATA_DOUBLE)sp.count : 0.0;
+ query_point_set_id(new_point, ops.db_total_points_read);
+
+ // set the right value to the point we got
+ if(likely(!storage_point_is_unset(sp) && !storage_point_is_empty(sp))) {
- if(unlikely(values_in_group == group_size)) {
- rrdr_line = rrdr_line_init(r, now, rrdr_line);
- size_t rrdr_o_v_index = rrdr_line * r->d + dim_id_in_rrdr;
+ if(unlikely(use_anomaly_bit_as_value))
+ new_point.value = new_point.anomaly;
- if(unlikely(!min_date)) min_date = now;
- max_date = now;
+ else {
+ switch (ops.tier_query_fetch) {
+ default:
+ case TIER_QUERY_FETCH_AVERAGE:
+ new_point.value = sp.sum / sp.count;
+ break;
- // find the place to store our values
- RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_o_v_index];
+ case TIER_QUERY_FETCH_MIN:
+ new_point.value = sp.min;
+ break;
- // update the dimension options
- if(likely(values_in_group_non_zero))
- r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
+ case TIER_QUERY_FETCH_MAX:
+ new_point.value = sp.max;
+ break;
- // store the specific point options
- *rrdr_value_options_ptr = group_value_flags;
+ case TIER_QUERY_FETCH_SUM:
+ new_point.value = sp.sum;
+ break;
+ };
+ }
+ }
+ else {
+ new_point.value = NAN;
+ new_point.flags = SN_FLAG_NONE;
+ }
+ }
+
+ // check if the db is giving us zero duration points
+ if(unlikely(new_point.start_time == new_point.end_time)) {
+ internal_error(true, "QUERY: next_metric(%s, %s) returned point %zu start time %ld, end time %ld, that are both equal",
+ rd->rrdset->name, rd->name, new_point.id, new_point.start_time, new_point.end_time);
- // store the group value
- calculated_number group_value = grouping_flush(r, rrdr_value_options_ptr);
- r->v[rrdr_o_v_index] = group_value;
+ new_point.start_time = new_point.end_time - ((time_t)ops.tier_ptr->tier_grouping * (time_t)ops.rd->update_every);
+ }
+
+ // check if the db is advancing the query
+ if(unlikely(new_point.end_time <= last1_point.end_time)) {
+ internal_error(true, "QUERY: next_metric(%s, %s) returned point %zu from %ld time %ld, before the last point %zu end time %ld, now is %ld to %ld",
+ rd->rrdset->name, rd->name, new_point.id, new_point.start_time, new_point.end_time,
+ last1_point.id, last1_point.end_time, now_start_time, now_end_time);
+
+ count_same_end_time++;
+ continue;
+ }
+ count_same_end_time = 0;
- if(likely(points_added || dim_id_in_rrdr)) {
- // find the min/max across all dimensions
+ // decide how to use this point
+ if(likely(new_point.end_time < now_end_time)) { // likely to favor tier0
+ // this db point ends before our now_end_time
- if(unlikely(group_value < min)) min = group_value;
- if(unlikely(group_value > max)) max = group_value;
+ if(likely(new_point.end_time >= now_start_time)) { // likely to favor tier0
+ // this db point ends after our now_start time
+ query_add_point_to_group(r, new_point, ops);
}
else {
- // runs only when dim_id_in_rrdr == 0 && points_added == 0
- // so, on the first point added for the query.
- min = max = group_value;
+ // we don't need this db point
+ // it is totally outside our current time-frame
+
+ // this is desirable for the first point of the query
+ // because it allows us to interpolate the next point
+ // at exactly the time we will want
+
+ // we only log if this is not point 1
+ internal_error(new_point.end_time < after_wanted && new_point.id > 1,
+ "QUERY: next_metric(%s, %s) returned point %zu from %ld time %ld, which is entirely before our current timeframe %ld to %ld (and before the entire query, after %ld, before %ld)",
+ rd->rrdset->name, rd->name,
+ new_point.id, new_point.start_time, new_point.end_time,
+ now_start_time, now_end_time,
+ after_wanted, before_wanted);
}
- points_added++;
- values_in_group = 0;
- group_value_flags = RRDR_VALUE_NOTHING;
- values_in_group_non_zero = 0;
+ }
+ else {
+ // the point ends in the future
+ // so, we will interpolate it below, at the inner loop
+ break;
}
}
- now = db_now;
+
+ if(unlikely(count_same_end_time)) {
+ internal_error(true,
+ "QUERY: the database does not advance the query, it returned an end time less or equal to the end time of the last point we got %ld, %zu times",
+ last1_point.end_time, count_same_end_time);
+
+ if(unlikely(new_point.end_time <= last1_point.end_time))
+ new_point.end_time = now_end_time;
+ }
+
+ // the inner loop
+ // we have 3 points in memory: last2, last1, new
+ // we select the one to use based on their timestamps
+
+ size_t iterations = 0;
+ for ( ; now_end_time <= new_point.end_time && (long)points_added < points_wanted ;
+ now_end_time += ops.view_update_every, iterations++) {
+
+ // now_start_time is wrong in this loop
+ // but, we don't need it
+
+ QUERY_POINT current_point;
+
+ if(likely(now_end_time > new_point.start_time)) {
+ // it is time for our NEW point to be used
+ current_point = new_point;
+ query_interpolate_point(current_point, last1_point, now_end_time);
+
+ internal_error(current_point.id > 0 && last1_point.id == 0 && current_point.end_time > after_wanted && current_point.end_time > now_end_time,
+ "QUERY: on '%s', dim '%s', after %ld, before %ld, view update every %ld, query granularity %ld,"
+ " interpolating point %zu (from %ld to %ld) at %ld, but we could really favor by having last_point1 in this query.",
+ rd->rrdset->name, rd->name, after_wanted, before_wanted, ops.view_update_every, ops.query_granularity,
+ current_point.id, current_point.start_time, current_point.end_time, now_end_time);
+ }
+ else if(likely(now_end_time <= last1_point.end_time)) {
+ // our LAST point is still valid
+ current_point = last1_point;
+ query_interpolate_point(current_point, last2_point, now_end_time);
+
+ internal_error(current_point.id > 0 && last2_point.id == 0 && current_point.end_time > after_wanted && current_point.end_time > now_end_time,
+ "QUERY: on '%s', dim '%s', after %ld, before %ld, view update every %ld, query granularity %ld,"
+ " interpolating point %zu (from %ld to %ld) at %ld, but we could really favor by having last_point2 in this query.",
+ rd->rrdset->name, rd->name, after_wanted, before_wanted, ops.view_update_every, ops.query_granularity,
+ current_point.id, current_point.start_time, current_point.end_time, now_end_time);
+ }
+ else {
+ // a GAP, we don't have a value this time
+ current_point = QUERY_POINT_EMPTY;
+ }
+
+ query_add_point_to_group(r, current_point, ops);
+
+ rrdr_line = rrdr_line_init(r, now_end_time, rrdr_line);
+ size_t rrdr_o_v_index = rrdr_line * r->d + dim_id_in_rrdr;
+
+ if(unlikely(!min_date)) min_date = now_end_time;
+ max_date = now_end_time;
+
+ // find the place to store our values
+ RRDR_VALUE_FLAGS *rrdr_value_options_ptr = &r->o[rrdr_o_v_index];
+
+ // update the dimension options
+ if(likely(ops.group_points_non_zero))
+ r->od[dim_id_in_rrdr] |= RRDR_DIMENSION_NONZERO;
+
+ // store the specific point options
+ *rrdr_value_options_ptr = ops.group_value_flags;
+
+ // store the group value
+ NETDATA_DOUBLE group_value = ops.grouping_flush(r, rrdr_value_options_ptr);
+ r->v[rrdr_o_v_index] = group_value;
+
+ // we only store uint8_t anomaly rates,
+ // so let's get double precision by storing
+ // anomaly rates in the range 0 - 200
+ r->ar[rrdr_o_v_index] = ops.group_anomaly_rate / (NETDATA_DOUBLE)ops.group_points_added;
+
+ if(likely(points_added || dim_id_in_rrdr)) {
+ // find the min/max across all dimensions
+
+ if(unlikely(group_value < min)) min = group_value;
+ if(unlikely(group_value > max)) max = group_value;
+
+ }
+ else {
+ // runs only when dim_id_in_rrdr == 0 && points_added == 0
+ // so, on the first point added for the query.
+ min = max = group_value;
+ }
+
+ points_added++;
+ ops.group_points_added = 0;
+ ops.group_value_flags = RRDR_VALUE_NOTHING;
+ ops.group_points_non_zero = 0;
+ ops.group_anomaly_rate = 0;
+ }
+ // the loop above increased "now" by query_granularity,
+ // but the main loop will increase it too,
+ // so, let's undo the last iteration of this loop
+ if(iterations)
+ now_end_time -= ops.view_update_every;
}
- rd->state->query_ops.finalize(&handle);
+ ops.finalize(&ops.handle);
- r->internal.db_points_read += db_points_read;
r->internal.result_points_generated += points_added;
+ r->internal.db_points_read += ops.db_total_points_read;
+ for(int tr = 0; tr < storage_tiers ; tr++)
+ r->internal.tier_points_read[tr] += ops.db_points_read_per_tier[tr];
r->min = min;
r->max = max;
r->before = max_date;
- r->after = min_date - (r->group - 1) * dt;
+ r->after = min_date - ops.view_update_every + ops.query_granularity;
rrdr_done(r, rrdr_line);
-#ifdef NETDATA_INTERNAL_CHECKS
- if(unlikely(r->rows != points_added))
- error("INTERNAL ERROR: %s.%s added %zu rows, but RRDR says I added %zu.", r->st->name, rd->name, (size_t)points_added, (size_t)r->rows);
-#endif
+ internal_error((long)points_added != points_wanted,
+ "QUERY: query on %s/%s requested %zu points, but RRDR added %zu (%zu db points read).",
+ r->st->name, rd->name, (size_t)points_wanted, (size_t)points_added, ops.db_total_points_read);
+}
+
+// ----------------------------------------------------------------------------
+// fill the gap of a tier
+
+extern void store_metric_at_tier(RRDDIM *rd, struct rrddim_tier *t, STORAGE_POINT sp, usec_t now_ut);
+
+void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, int tier, time_t now) {
+ if(unlikely(tier < 0 || tier >= storage_tiers)) return;
+ if(storage_tiers_backfill[tier] == RRD_BACKFILL_NONE) return;
+
+ struct rrddim_tier *t = rd->tiers[tier];
+ if(unlikely(!t)) return;
+
+ time_t latest_time_t = t->query_ops.latest_time(t->db_metric_handle);
+ time_t granularity = (time_t)t->tier_grouping * (time_t)rd->update_every;
+ time_t time_diff = now - latest_time_t;
+
+ // if the user wants only NEW backfilling, and we don't have any data
+ if(storage_tiers_backfill[tier] == RRD_BACKFILL_NEW && latest_time_t <= 0) return;
+
+ // there is really nothing we can do
+ if(now <= latest_time_t || time_diff < granularity) return;
+
+ struct rrddim_query_handle handle;
+
+ size_t all_points_read = 0;
+
+ // for each lower tier
+ for(int tr = tier - 1; tr >= 0 ;tr--){
+ time_t smaller_tier_first_time = rd->tiers[tr]->query_ops.oldest_time(rd->tiers[tr]->db_metric_handle);
+ time_t smaller_tier_last_time = rd->tiers[tr]->query_ops.latest_time(rd->tiers[tr]->db_metric_handle);
+ if(smaller_tier_last_time <= latest_time_t) continue; // it is as bad as we are
+
+ long after_wanted = (latest_time_t < smaller_tier_first_time) ? smaller_tier_first_time : latest_time_t;
+ long before_wanted = smaller_tier_last_time;
+
+ struct rrddim_tier *tmp = rd->tiers[tr];
+ tmp->query_ops.init(tmp->db_metric_handle, &handle, after_wanted, before_wanted, TIER_QUERY_FETCH_AVERAGE);
+
+ size_t points = 0;
+
+ while(!tmp->query_ops.is_finished(&handle)) {
+
+ STORAGE_POINT sp = tmp->query_ops.next_metric(&handle);
+
+ if(sp.end_time > latest_time_t) {
+ latest_time_t = sp.end_time;
+ store_metric_at_tier(rd, t, sp, sp.end_time * USEC_PER_SEC);
+ points++;
+ }
+ }
+
+ all_points_read += points;
+ tmp->query_ops.finalize(&handle);
+
+ //internal_error(true, "DBENGINE: backfilled chart '%s', dimension '%s', tier %d, from %ld to %ld, with %zu points from tier %d",
+ // rd->rrdset->name, rd->name, tier, after_wanted, before_wanted, points, tr);
+ }
+
+ rrdr_query_completed(all_points_read, all_points_read);
}
// ----------------------------------------------------------------------------
@@ -719,8 +1494,9 @@ static inline void do_dimension_fixedstep(
#ifdef NETDATA_INTERNAL_CHECKS
static void rrd2rrdr_log_request_response_metadata(RRDR *r
+ , RRDR_OPTIONS options __maybe_unused
, RRDR_GROUPING group_method
- , int aligned
+ , bool aligned
, long group
, long resampling_time
, long resampling_group
@@ -736,9 +1512,9 @@ static void rrd2rrdr_log_request_response_metadata(RRDR *r
) {
netdata_rwlock_rdlock(&r->st->rrdset_rwlock);
info("INTERNAL ERROR: rrd2rrdr() on %s update every %d with %s grouping %s (group: %ld, resampling_time: %ld, resampling_group: %ld), "
- "after (got: %zu, want: %zu, req: %zu, db: %zu), "
- "before (got: %zu, want: %zu, req: %zu, db: %zu), "
- "duration (got: %zu, want: %zu, req: %zu, db: %zu), "
+ "after (got: %zu, want: %zu, req: %ld, db: %zu), "
+ "before (got: %zu, want: %zu, req: %ld, db: %zu), "
+ "duration (got: %zu, want: %zu, req: %ld, db: %zu), "
//"slot (after: %zu, before: %zu, delta: %zu), "
"points (got: %ld, want: %ld, req: %ld, db: %ld), "
"%s"
@@ -755,19 +1531,19 @@ static void rrd2rrdr_log_request_response_metadata(RRDR *r
// after
, (size_t)r->after
, (size_t)after_wanted
- , (size_t)after_requested
+ , after_requested
, (size_t)rrdset_first_entry_t_nolock(r->st)
// before
, (size_t)r->before
, (size_t)before_wanted
- , (size_t)before_requested
+ , before_requested
, (size_t)rrdset_last_entry_t_nolock(r->st)
// duration
, (size_t)(r->before - r->after + r->st->update_every)
, (size_t)(before_wanted - after_wanted + r->st->update_every)
- , (size_t)(before_requested - after_requested)
+ , before_requested - after_requested
, (size_t)((rrdset_last_entry_t_nolock(r->st) - rrdset_first_entry_t_nolock(r->st)) + r->st->update_every)
// slot
@@ -791,79 +1567,88 @@ static void rrd2rrdr_log_request_response_metadata(RRDR *r
#endif // NETDATA_INTERNAL_CHECKS
// Returns 1 if an absolute period was requested or 0 if it was a relative period
-static int rrdr_convert_before_after_to_absolute(
- long long *after_requestedp
- , long long *before_requestedp
- , int update_every
- , time_t first_entry_t
- , time_t last_entry_t
- , RRDR_OPTIONS options
-) {
+int rrdr_relative_window_to_absolute(long long *after, long long *before) {
+ time_t now = now_realtime_sec() - 1;
+
int absolute_period_requested = -1;
long long after_requested, before_requested;
- before_requested = *before_requestedp;
- after_requested = *after_requestedp;
-
- if(before_requested == 0 && after_requested == 0) {
- // dump the all the data
- before_requested = last_entry_t;
- after_requested = first_entry_t;
- absolute_period_requested = 0;
- }
+ before_requested = *before;
+ after_requested = *after;
// allow relative for before (smaller than API_RELATIVE_TIME_MAX)
if(ABS(before_requested) <= API_RELATIVE_TIME_MAX) {
- if(ABS(before_requested) % update_every) {
- // make sure it is multiple of st->update_every
- if(before_requested < 0) before_requested = before_requested - update_every -
- before_requested % update_every;
- else before_requested = before_requested + update_every - before_requested % update_every;
- }
- if(before_requested > 0) before_requested = first_entry_t + before_requested;
- else before_requested = last_entry_t + before_requested; //last_entry_t is not really now_t
- //TODO: fix before_requested to be relative to now_t
+ // if the user asked for a positive relative time,
+ // flip it to a negative
+ if(before_requested > 0)
+ before_requested = -before_requested;
+
+ before_requested = now + before_requested;
absolute_period_requested = 0;
}
// allow relative for after (smaller than API_RELATIVE_TIME_MAX)
if(ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
- if(after_requested == 0) after_requested = -update_every;
- if(ABS(after_requested) % update_every) {
- // make sure it is multiple of st->update_every
- if(after_requested < 0) after_requested = after_requested - update_every - after_requested % update_every;
- else after_requested = after_requested + update_every - after_requested % update_every;
- }
- after_requested = before_requested + after_requested;
+ if(after_requested > 0)
+ after_requested = -after_requested;
+
+ // if the user didn't give an after, use the number of points
+ // to give a sane default
+ if(after_requested == 0)
+ after_requested = -600;
+
+ // since the query engine now returns inclusive timestamps
+ // it is awkward to return 6 points when after=-5 is given
+ // so for relative queries we add 1 second, to give
+ // more predictable results to users.
+ after_requested = before_requested + after_requested + 1;
absolute_period_requested = 0;
}
if(absolute_period_requested == -1)
absolute_period_requested = 1;
- // make sure they are within our timeframe
- if(before_requested > last_entry_t) before_requested = last_entry_t;
- if(before_requested < first_entry_t && !(options & RRDR_OPTION_ALLOW_PAST))
- before_requested = first_entry_t;
-
- if(after_requested > last_entry_t) after_requested = last_entry_t;
- if(after_requested < first_entry_t && !(options & RRDR_OPTION_ALLOW_PAST))
- after_requested = first_entry_t;
-
- // check if they are reversed
+ // check if the parameters are flipped
if(after_requested > before_requested) {
- time_t tmp = before_requested;
+ long long t = before_requested;
before_requested = after_requested;
- after_requested = tmp;
+ after_requested = t;
+ }
+
+ // if the query requests future data
+ // shift the query back to be in the present time
+ // (this may also happen because of the rules above)
+ if(before_requested > now) {
+ long long delta = before_requested - now;
+ before_requested -= delta;
+ after_requested -= delta;
}
- *before_requestedp = before_requested;
- *after_requestedp = after_requested;
+ *before = before_requested;
+ *after = after_requested;
return absolute_period_requested;
}
-static RRDR *rrd2rrdr_fixedstep(
+// #define DEBUG_QUERY_LOGIC 1
+
+#ifdef DEBUG_QUERY_LOGIC
+#define query_debug_log_init() BUFFER *debug_log = buffer_create(1000)
+#define query_debug_log(args...) buffer_sprintf(debug_log, ##args)
+#define query_debug_log_fin() { \
+ info("QUERY: chart '%s', after:%lld, before:%lld, duration:%lld, points:%ld, res:%ld - wanted => after:%lld, before:%lld, points:%ld, group:%ld, granularity:%ld, resgroup:%ld, resdiv:" NETDATA_DOUBLE_FORMAT_AUTO " %s", st->name, after_requested, before_requested, before_requested - after_requested, points_requested, resampling_time_requested, after_wanted, before_wanted, points_wanted, group, query_granularity, resampling_group, resampling_divisor, buffer_tostring(debug_log)); \
+ buffer_free(debug_log); \
+ debug_log = NULL; \
+ }
+#define query_debug_log_free() do { buffer_free(debug_log); } while(0)
+#else
+#define query_debug_log_init() debug_dummy()
+#define query_debug_log(args...) debug_dummy()
+#define query_debug_log_fin() debug_dummy()
+#define query_debug_log_free() debug_dummy()
+#endif
+
+RRDR *rrd2rrdr(
ONEWAYALLOC *owa
, RRDSET *st
, long points_requested
@@ -873,564 +1658,272 @@ static RRDR *rrd2rrdr_fixedstep(
, long resampling_time_requested
, RRDR_OPTIONS options
, const char *dimensions
- , int update_every
- , time_t first_entry_t
- , time_t last_entry_t
- , int absolute_period_requested
, struct context_param *context_param_list
+ , const char *group_options
, int timeout
+ , int tier
) {
- int aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
-
- // the duration of the chart
- time_t duration = before_requested - after_requested;
- long available_points = duration / update_every;
-
- RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
-
- if(duration <= 0 || available_points <= 0)
- return rrdr_create(owa, st, 1, context_param_list);
-
- // check the number of wanted points in the result
- if(unlikely(points_requested < 0)) points_requested = -points_requested;
- if(unlikely(points_requested > available_points)) points_requested = available_points;
- if(unlikely(points_requested == 0)) points_requested = available_points;
-
- // calculate the desired grouping of source data points
- long group = available_points / points_requested;
- if(unlikely(group <= 0)) group = 1;
- if(unlikely(available_points % points_requested > points_requested / 2)) group++; // rounding to the closest integer
-
- // resampling_time_requested enforces a certain grouping multiple
- calculated_number resampling_divisor = 1.0;
- long resampling_group = 1;
- if(unlikely(resampling_time_requested > update_every)) {
- if (unlikely(resampling_time_requested > duration)) {
- // group_time is above the available duration
-
- #ifdef NETDATA_INTERNAL_CHECKS
- info("INTERNAL CHECK: %s: requested gtime %ld secs, is greater than the desired duration %ld secs", st->id, resampling_time_requested, duration);
- #endif
-
- after_requested = before_requested - resampling_time_requested;
- duration = before_requested - after_requested;
- available_points = duration / update_every;
- group = available_points / points_requested;
- }
-
- // if the duration is not aligned to resampling time
- // extend the duration to the past, to avoid a gap at the chart
- // only when the missing duration is above 1/10th of a point
- if(duration % resampling_time_requested) {
- time_t delta = duration % resampling_time_requested;
- if(delta > resampling_time_requested / 10) {
- after_requested -= resampling_time_requested - delta;
- duration = before_requested - after_requested;
- available_points = duration / update_every;
- group = available_points / points_requested;
- }
- }
-
- // the points we should group to satisfy gtime
- resampling_group = resampling_time_requested / update_every;
- if(unlikely(resampling_time_requested % update_every)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- info("INTERNAL CHECK: %s: requested gtime %ld secs, is not a multiple of the chart's data collection frequency %d secs", st->id, resampling_time_requested, update_every);
- #endif
-
- resampling_group++;
- }
-
- // adapt group according to resampling_group
- if(unlikely(group < resampling_group)) group = resampling_group; // do not allow grouping below the desired one
- if(unlikely(group % resampling_group)) group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
-
- //resampling_divisor = group / resampling_group;
- resampling_divisor = (calculated_number)(group * update_every) / (calculated_number)resampling_time_requested;
- }
-
- // now that we have group,
- // align the requested timeframe to fit it.
-
- if(aligned) {
- // alignment has been requested, so align the values
- before_requested -= before_requested % (group * update_every);
- after_requested -= after_requested % (group * update_every);
- }
-
- // we align the request on requested_before
- time_t before_wanted = before_requested;
- if(likely(before_wanted > last_entry_t)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, before_wanted is after db max", st->name);
- #endif
-
- before_wanted = last_entry_t - (last_entry_t % ( ((aligned)?group:1) * update_every ));
- }
- //size_t before_slot = rrdset_time2slot(st, before_wanted);
-
- // we need to estimate the number of points, for having
- // an integer number of values per point
- long points_wanted = (before_wanted - after_requested) / (update_every * group);
-
- time_t after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
- if(unlikely(after_wanted < first_entry_t)) {
- // hm... we go to the past, calculate again points_wanted using all the db from before_wanted to the beginning
- points_wanted = (before_wanted - first_entry_t) / group;
-
- // recalculate after wanted with the new number of points
- after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
-
- if(unlikely(after_wanted < first_entry_t)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, after_wanted is before db min", st->name);
- #endif
-
- after_wanted = first_entry_t - (first_entry_t % ( ((aligned)?group:1) * update_every )) + ( ((aligned)?group:1) * update_every );
- }
- }
- //size_t after_slot = rrdset_time2slot(st, after_wanted);
-
- // check if they are reversed
- if(unlikely(after_wanted > before_wanted)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, reversed wanted after/before", st->name);
- #endif
- time_t tmp = before_wanted;
- before_wanted = after_wanted;
- after_wanted = tmp;
+ // RULES
+ // points_requested = 0
+ // the user wants all the natural points the database has
+ //
+ // after_requested = 0
+ // the user wants to start the query from the oldest point in our database
+ //
+ // before_requested = 0
+ // the user wants the query to end to the latest point in our database
+ //
+ // when natural points are wanted, the query has to be aligned to the update_every
+ // of the database
+
+ long points_wanted = points_requested;
+ long long after_wanted = after_requested;
+ long long before_wanted = before_requested;
+ int update_every = st->update_every;
+
+ bool aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
+ bool automatic_natural_points = (points_wanted == 0);
+ bool relative_period_requested = false;
+ bool natural_points = (options & RRDR_OPTION_NATURAL_POINTS) || automatic_natural_points;
+ bool before_is_aligned_to_db_end = false;
+
+ query_debug_log_init();
+
+ // make sure points_wanted is positive
+ if(points_wanted < 0) {
+ points_wanted = -points_wanted;
+ query_debug_log(":-points_wanted %ld", points_wanted);
}
- // recalculate points_wanted using the final time-frame
- points_wanted = (before_wanted - after_wanted) / update_every / group + 1;
- if(unlikely(points_wanted < 0)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, points_wanted is %ld", st->name, points_wanted);
- #endif
- points_wanted = 0;
+ if(ABS(before_requested) <= API_RELATIVE_TIME_MAX || ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
+ relative_period_requested = true;
+ natural_points = true;
+ options |= RRDR_OPTION_NATURAL_POINTS;
+ query_debug_log(":relative+natural");
}
-#ifdef NETDATA_INTERNAL_CHECKS
- duration = before_wanted - after_wanted;
-
- if(after_wanted < first_entry_t)
- error("INTERNAL CHECK: after_wanted %u is too small, minimum %u", (uint32_t)after_wanted, (uint32_t)first_entry_t);
-
- if(after_wanted > last_entry_t)
- error("INTERNAL CHECK: after_wanted %u is too big, maximum %u", (uint32_t)after_wanted, (uint32_t)last_entry_t);
-
- if(before_wanted < first_entry_t)
- error("INTERNAL CHECK: before_wanted %u is too small, minimum %u", (uint32_t)before_wanted, (uint32_t)first_entry_t);
-
- if(before_wanted > last_entry_t)
- error("INTERNAL CHECK: before_wanted %u is too big, maximum %u", (uint32_t)before_wanted, (uint32_t)last_entry_t);
-
-/*
- if(before_slot >= (size_t)st->entries)
- error("INTERNAL CHECK: before_slot is invalid %zu, expected 0 to %ld", before_slot, st->entries - 1);
+ // if the user wants virtual points, make sure we do it
+ if(options & RRDR_OPTION_VIRTUAL_POINTS)
+ natural_points = false;
- if(after_slot >= (size_t)st->entries)
- error("INTERNAL CHECK: after_slot is invalid %zu, expected 0 to %ld", after_slot, st->entries - 1);
-*/
+ // set the right flag about natural and virtual points
+ if(natural_points) {
+ options |= RRDR_OPTION_NATURAL_POINTS;
- if(points_wanted > (before_wanted - after_wanted) / group / update_every + 1)
- error("INTERNAL CHECK: points_wanted %ld is more than points %ld", points_wanted, (before_wanted - after_wanted) / group / update_every + 1);
-
- if(group < resampling_group)
- error("INTERNAL CHECK: group %ld is less than the desired group points %ld", group, resampling_group);
-
- if(group > resampling_group && group % resampling_group)
- error("INTERNAL CHECK: group %ld is not a multiple of the desired group points %ld", group, resampling_group);
-#endif
-
- // -------------------------------------------------------------------------
- // initialize our result set
- // this also locks the chart for us
-
- RRDR *r = rrdr_create(owa, st, points_wanted, context_param_list);
- if(unlikely(!r)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL CHECK: Cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (uint32_t)duration, points_wanted);
- #endif
- return NULL;
+ if(options & RRDR_OPTION_VIRTUAL_POINTS)
+ options &= ~RRDR_OPTION_VIRTUAL_POINTS;
}
+ else {
+ options |= RRDR_OPTION_VIRTUAL_POINTS;
- if(unlikely(!r->d || !points_wanted)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL CHECK: Returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%zu, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (size_t)duration, points_wanted);
- #endif
- return r;
+ if(options & RRDR_OPTION_NATURAL_POINTS)
+ options &= ~RRDR_OPTION_NATURAL_POINTS;
}
- if(unlikely(absolute_period_requested == 1))
- r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
- else
- r->result_options |= RRDR_RESULT_OPTION_RELATIVE;
-
- // find how many dimensions we have
- long dimensions_count = r->d;
-
- // -------------------------------------------------------------------------
- // initialize RRDR
-
- r->group = group;
- r->update_every = (int)group * update_every;
- r->before = before_wanted;
- r->after = after_wanted;
- r->internal.points_wanted = points_wanted;
- r->internal.resampling_group = resampling_group;
- r->internal.resampling_divisor = resampling_divisor;
+ if(after_wanted == 0 || before_wanted == 0) {
+ // for non-context queries we have to find the duration of the database
+ // for context queries we will assume 600 seconds duration
+ if(!context_param_list) {
+ relative_period_requested = true;
- // -------------------------------------------------------------------------
- // assign the processor functions
+ rrdset_rdlock(st);
+ time_t first_entry_t = rrdset_first_entry_t_nolock(st);
+ time_t last_entry_t = rrdset_last_entry_t_nolock(st);
+ rrdset_unlock(st);
- {
- int i, found = 0;
- for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
- if(api_v1_data_groups[i].value == group_method) {
- r->internal.grouping_create= api_v1_data_groups[i].create;
- r->internal.grouping_reset = api_v1_data_groups[i].reset;
- r->internal.grouping_free = api_v1_data_groups[i].free;
- r->internal.grouping_add = api_v1_data_groups[i].add;
- r->internal.grouping_flush = api_v1_data_groups[i].flush;
- found = 1;
+ if(first_entry_t == 0 || last_entry_t == 0) {
+ internal_error(true, "QUERY: chart without data detected on '%s'", st->name);
+ query_debug_log_free();
+ return NULL;
}
- }
- if(!found) {
- errno = 0;
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: grouping method %u not found for chart '%s'. Using 'average'", (unsigned int)group_method, r->st->name);
- #endif
- r->internal.grouping_create= grouping_create_average;
- r->internal.grouping_reset = grouping_reset_average;
- r->internal.grouping_free = grouping_free_average;
- r->internal.grouping_add = grouping_add_average;
- r->internal.grouping_flush = grouping_flush_average;
- }
- }
-
- // allocate any memory required by the grouping method
- r->internal.grouping_create(r);
-
-
- // -------------------------------------------------------------------------
- // disable the not-wanted dimensions
-
- if (context_param_list && !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE))
- rrdset_check_rdlock(st);
-
- if(dimensions)
- rrdr_disable_not_selected_dimensions(r, options, dimensions, context_param_list);
-
-
- // -------------------------------------------------------------------------
- // do the work for each dimension
-
- time_t max_after = 0, min_before = 0;
- long max_rows = 0;
-
- RRDDIM *rd;
- long c, dimensions_used = 0, dimensions_nonzero = 0;
- struct timeval query_start_time;
- struct timeval query_current_time;
- if (timeout)
- now_realtime_timeval(&query_start_time);
- for(rd = temp_rd?temp_rd:st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
-
- // if we need a percentage, we need to calculate all dimensions
- if(unlikely(!(options & RRDR_OPTION_PERCENTAGE) && (r->od[c] & RRDR_DIMENSION_HIDDEN))) {
- if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED;
- continue;
- }
- r->od[c] |= RRDR_DIMENSION_SELECTED;
-
- // reset the grouping for the new dimension
- r->internal.grouping_reset(r);
-
- do_dimension_fixedstep(
- r
- , points_wanted
- , rd
- , c
- , after_wanted
- , before_wanted
- , options
- );
- if (timeout)
- now_realtime_timeval(&query_current_time);
- if(r->od[c] & RRDR_DIMENSION_NONZERO)
- dimensions_nonzero++;
+ query_debug_log(":first_entry_t %ld, last_entry_t %ld", first_entry_t, last_entry_t);
- // verify all dimensions are aligned
- if(unlikely(!dimensions_used)) {
- min_before = r->before;
- max_after = r->after;
- max_rows = r->rows;
- }
- else {
- if(r->after != max_after) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)max_after, rd->name, (size_t)r->after);
- #endif
- r->after = (r->after > max_after) ? r->after : max_after;
+ if (after_wanted == 0) {
+ after_wanted = first_entry_t;
+ query_debug_log(":zero after_wanted %lld", after_wanted);
}
- if(r->before != min_before) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)min_before, rd->name, (size_t)r->before);
- #endif
- r->before = (r->before < min_before) ? r->before : min_before;
+ if (before_wanted == 0) {
+ before_wanted = last_entry_t;
+ before_is_aligned_to_db_end = true;
+ query_debug_log(":zero before_wanted %lld", before_wanted);
}
- if(r->rows != max_rows) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)max_rows, rd->name, (size_t)r->rows);
- #endif
- r->rows = (r->rows > max_rows) ? r->rows : max_rows;
+ if(points_wanted == 0) {
+ points_wanted = (last_entry_t - first_entry_t) / update_every;
+ query_debug_log(":zero points_wanted %ld", points_wanted);
}
}
- dimensions_used++;
- if (timeout && (dt_usec(&query_start_time, &query_current_time) / 1000.0) > timeout) {
- log_access("QUERY CANCELED RUNTIME EXCEEDED %0.2f ms (LIMIT %d ms)",
- dt_usec(&query_start_time, &query_current_time) / 1000.0, timeout);
- r->result_options |= RRDR_RESULT_OPTION_CANCEL;
- break;
+ // if they are still zero, assume 600
+
+ if(after_wanted == 0) {
+ after_wanted = -600;
+ query_debug_log(":zero600 after_wanted %lld", after_wanted);
}
}
- #ifdef NETDATA_INTERNAL_CHECKS
- if (dimensions_used) {
- if(r->internal.log)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ r->internal.log);
-
- if(r->rows != points_wanted)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'points' is not wanted 'points'");
-
- if(aligned && (r->before % group) != 0)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "'before' is not aligned but alignment is required");
+ if(points_wanted == 0) {
+ points_wanted = 600;
+ query_debug_log(":zero600 points_wanted %ld", points_wanted);
+ }
- // 'after' should not be aligned, since we start inside the first group
- //if(aligned && (r->after % group) != 0)
- // rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
+ // convert our before_wanted and after_wanted to absolute
+ rrdr_relative_window_to_absolute(&after_wanted, &before_wanted);
+ query_debug_log(":relative2absolute after %lld, before %lld", after_wanted, before_wanted);
- if(r->before != before_requested)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "chart is not aligned to requested 'before'");
+ if(natural_points && (options & RRDR_OPTION_SELECTED_TIER) && tier > 0 && storage_tiers > 1) {
+ update_every = rrdset_find_natural_update_every_for_timeframe(st, after_wanted, before_wanted, points_wanted, options, tier);
+ if(update_every <= 0) update_every = st->update_every;
+ query_debug_log(":natural update every %d", update_every);
+ }
- if(r->before != before_wanted)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'before' is not wanted 'before'");
+ // this is the update_every of the query
+ // it may be different to the update_every of the database
+ time_t query_granularity = (natural_points)?update_every:1;
+ if(query_granularity <= 0) query_granularity = 1;
+ query_debug_log(":query_granularity %ld", query_granularity);
- // reported 'after' varies, depending on group
- if(r->after != after_wanted)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'after' is not wanted 'after'");
+ // align before_wanted and after_wanted to query_granularity
+ if (before_wanted % query_granularity) {
+ before_wanted -= before_wanted % query_granularity;
+ query_debug_log(":granularity align before_wanted %lld", before_wanted);
}
- #endif
- // free all resources used by the grouping method
- r->internal.grouping_free(r);
+ if (after_wanted % query_granularity) {
+ after_wanted -= after_wanted % query_granularity;
+ query_debug_log(":granularity align after_wanted %lld", after_wanted);
+ }
- // when all the dimensions are zero, we should return all of them
- if(unlikely(options & RRDR_OPTION_NONZERO && !dimensions_nonzero && !(r->result_options & RRDR_RESULT_OPTION_CANCEL))) {
- // all the dimensions are zero
- // mark them as NONZERO to send them all
- for(rd = temp_rd?temp_rd:st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
- if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
- r->od[c] |= RRDR_DIMENSION_NONZERO;
- }
+ // automatic_natural_points is set when the user wants all the points available in the database
+ if(automatic_natural_points) {
+ points_wanted = (before_wanted - after_wanted + 1) / query_granularity;
+ if(unlikely(points_wanted <= 0)) points_wanted = 1;
+ query_debug_log(":auto natural points_wanted %ld", points_wanted);
}
- rrdr_query_completed(r->internal.db_points_read, r->internal.result_points_generated);
- return r;
-}
+ time_t duration = before_wanted - after_wanted;
-#ifdef ENABLE_DBENGINE
-static RRDR *rrd2rrdr_variablestep(
- ONEWAYALLOC *owa
- , RRDSET *st
- , long points_requested
- , long long after_requested
- , long long before_requested
- , RRDR_GROUPING group_method
- , long resampling_time_requested
- , RRDR_OPTIONS options
- , const char *dimensions
- , int update_every
- , time_t first_entry_t
- , time_t last_entry_t
- , int absolute_period_requested
- , struct rrdeng_region_info *region_info_array
- , struct context_param *context_param_list
- , int timeout
-) {
- int aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
+ // if the resampling time is too big, extend the duration to the past
+ if (unlikely(resampling_time_requested > duration)) {
+ after_wanted = before_wanted - resampling_time_requested;
+ duration = before_wanted - after_wanted;
+ query_debug_log(":resampling after_wanted %lld", after_wanted);
+ }
- // the duration of the chart
- time_t duration = before_requested - after_requested;
- long available_points = duration / update_every;
+ // if the duration is not aligned to resampling time
+ // extend the duration to the past, to avoid a gap at the chart
+ // only when the missing duration is above 1/10th of a point
+ if(resampling_time_requested > query_granularity && duration % resampling_time_requested) {
+ time_t delta = duration % resampling_time_requested;
+ if(delta > resampling_time_requested / 10) {
+ after_wanted -= resampling_time_requested - delta;
+ duration = before_wanted - after_wanted;
+ query_debug_log(":resampling2 after_wanted %lld", after_wanted);
+ }
+ }
- RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
+ // the available points of the query
+ long points_available = (duration + 1) / query_granularity;
+ if(unlikely(points_available <= 0)) points_available = 1;
+ query_debug_log(":points_available %ld", points_available);
- if(duration <= 0 || available_points <= 0) {
- freez(region_info_array);
- return rrdr_create(owa, st, 1, context_param_list);
+ if(points_wanted > points_available) {
+ points_wanted = points_available;
+ query_debug_log(":max points_wanted %ld", points_wanted);
}
- // check the number of wanted points in the result
- if(unlikely(points_requested < 0)) points_requested = -points_requested;
- if(unlikely(points_requested > available_points)) points_requested = available_points;
- if(unlikely(points_requested == 0)) points_requested = available_points;
-
// calculate the desired grouping of source data points
- long group = available_points / points_requested;
- if(unlikely(group <= 0)) group = 1;
- if(unlikely(available_points % points_requested > points_requested / 2)) group++; // rounding to the closest integer
-
- // resampling_time_requested enforces a certain grouping multiple
- calculated_number resampling_divisor = 1.0;
- long resampling_group = 1;
- if(unlikely(resampling_time_requested > update_every)) {
- if (unlikely(resampling_time_requested > duration)) {
- // group_time is above the available duration
-
- #ifdef NETDATA_INTERNAL_CHECKS
- info("INTERNAL CHECK: %s: requested gtime %ld secs, is greater than the desired duration %ld secs", st->id, resampling_time_requested, duration);
- #endif
-
- after_requested = before_requested - resampling_time_requested;
- duration = before_requested - after_requested;
- available_points = duration / update_every;
- group = available_points / points_requested;
- }
-
- // if the duration is not aligned to resampling time
- // extend the duration to the past, to avoid a gap at the chart
- // only when the missing duration is above 1/10th of a point
- if(duration % resampling_time_requested) {
- time_t delta = duration % resampling_time_requested;
- if(delta > resampling_time_requested / 10) {
- after_requested -= resampling_time_requested - delta;
- duration = before_requested - after_requested;
- available_points = duration / update_every;
- group = available_points / points_requested;
- }
- }
+ long group = points_available / points_wanted;
+ if(group <= 0) group = 1;
- // the points we should group to satisfy gtime
- resampling_group = resampling_time_requested / update_every;
- if(unlikely(resampling_time_requested % update_every)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- info("INTERNAL CHECK: %s: requested gtime %ld secs, is not a multiple of the chart's data collection frequency %d secs", st->id, resampling_time_requested, update_every);
- #endif
-
- resampling_group++;
- }
+ // round "group" to the closest integer
+ if(points_available % points_wanted > points_wanted / 2)
+ group++;
- // adapt group according to resampling_group
- if(unlikely(group < resampling_group)) group = resampling_group; // do not allow grouping below the desired one
- if(unlikely(group % resampling_group)) group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
+ query_debug_log(":group %ld", group);
- //resampling_divisor = group / resampling_group;
- resampling_divisor = (calculated_number)(group * update_every) / (calculated_number)resampling_time_requested;
- }
+ if(points_wanted * group * query_granularity < duration) {
+ // the grouping we are going to do, is not enough
+ // to cover the entire duration requested, so
+ // we have to change the number of points, to make sure we will
+ // respect the timeframe as closely as possibly
- // now that we have group,
- // align the requested timeframe to fit it.
+ // let's see how many points are the optimal
+ points_wanted = points_available / group;
- if(aligned) {
- // alignment has been requested, so align the values
- before_requested -= before_requested % (group * update_every);
- after_requested -= after_requested % (group * update_every);
- }
+ if(points_wanted * group < points_available)
+ points_wanted++;
- // we align the request on requested_before
- time_t before_wanted = before_requested;
- if(likely(before_wanted > last_entry_t)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, before_wanted is after db max", st->name);
- #endif
+ if(unlikely(points_wanted <= 0))
+ points_wanted = 1;
- before_wanted = last_entry_t - (last_entry_t % ( ((aligned)?group:1) * update_every ));
+ query_debug_log(":optimal points %ld", points_wanted);
}
- //size_t before_slot = rrdset_time2slot(st, before_wanted);
-
- // we need to estimate the number of points, for having
- // an integer number of values per point
- long points_wanted = (before_wanted - after_requested) / (update_every * group);
-
- time_t after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
- if(unlikely(after_wanted < first_entry_t)) {
- // hm... we go to the past, calculate again points_wanted using all the db from before_wanted to the beginning
- points_wanted = (before_wanted - first_entry_t) / group;
- // recalculate after wanted with the new number of points
- after_wanted = before_wanted - (points_wanted * group * update_every) + update_every;
+ // resampling_time_requested enforces a certain grouping multiple
+ NETDATA_DOUBLE resampling_divisor = 1.0;
+ long resampling_group = 1;
+ if(unlikely(resampling_time_requested > query_granularity)) {
+ // the points we should group to satisfy gtime
+ resampling_group = resampling_time_requested / query_granularity;
+ if(unlikely(resampling_time_requested % query_granularity))
+ resampling_group++;
- if(unlikely(after_wanted < first_entry_t)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, after_wanted is before db min", st->name);
- #endif
+ query_debug_log(":resampling group %ld", resampling_group);
- after_wanted = first_entry_t - (first_entry_t % ( ((aligned)?group:1) * update_every )) + ( ((aligned)?group:1) * update_every );
+ // adapt group according to resampling_group
+ if(unlikely(group < resampling_group)) {
+ group = resampling_group; // do not allow grouping below the desired one
+ query_debug_log(":group less res %ld", group);
+ }
+ if(unlikely(group % resampling_group)) {
+ group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
+ query_debug_log(":group mod res %ld", group);
}
- }
- //size_t after_slot = rrdset_time2slot(st, after_wanted);
-
- // check if they are reversed
- if(unlikely(after_wanted > before_wanted)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, reversed wanted after/before", st->name);
- #endif
- time_t tmp = before_wanted;
- before_wanted = after_wanted;
- after_wanted = tmp;
- }
- // recalculate points_wanted using the final time-frame
- points_wanted = (before_wanted - after_wanted) / update_every / group + 1;
- if(unlikely(points_wanted < 0)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: rrd2rrdr() on %s, points_wanted is %ld", st->name, points_wanted);
- #endif
- points_wanted = 0;
+ // resampling_divisor = group / resampling_group;
+ resampling_divisor = (NETDATA_DOUBLE)(group * query_granularity) / (NETDATA_DOUBLE)resampling_time_requested;
+ query_debug_log(":resampling divisor " NETDATA_DOUBLE_FORMAT, resampling_divisor);
}
-#ifdef NETDATA_INTERNAL_CHECKS
- duration = before_wanted - after_wanted;
-
- if(after_wanted < first_entry_t)
- error("INTERNAL CHECK: after_wanted %u is too small, minimum %u", (uint32_t)after_wanted, (uint32_t)first_entry_t);
-
- if(after_wanted > last_entry_t)
- error("INTERNAL CHECK: after_wanted %u is too big, maximum %u", (uint32_t)after_wanted, (uint32_t)last_entry_t);
+ // now that we have group, align the requested timeframe to fit it.
+ if(aligned && before_wanted % (group * query_granularity)) {
+ if(before_is_aligned_to_db_end)
+ before_wanted -= before_wanted % (group * query_granularity);
+ else
+ before_wanted += (group * query_granularity) - before_wanted % (group * query_granularity);
+ query_debug_log(":align before_wanted %lld", before_wanted);
+ }
- if(before_wanted < first_entry_t)
- error("INTERNAL CHECK: before_wanted %u is too small, minimum %u", (uint32_t)before_wanted, (uint32_t)first_entry_t);
+ after_wanted = before_wanted - (points_wanted * group * query_granularity) + query_granularity;
+ query_debug_log(":final after_wanted %lld", after_wanted);
- if(before_wanted > last_entry_t)
- error("INTERNAL CHECK: before_wanted %u is too big, maximum %u", (uint32_t)before_wanted, (uint32_t)last_entry_t);
+ duration = before_wanted - after_wanted;
+ query_debug_log(":final duration %ld", duration + 1);
-/*
- if(before_slot >= (size_t)st->entries)
- error("INTERNAL CHECK: before_slot is invalid %zu, expected 0 to %ld", before_slot, st->entries - 1);
+ // check the context query based on the starting time of the query
+ if (context_param_list && !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE)) {
+ rebuild_context_param_list(owa, context_param_list, after_wanted);
+ st = context_param_list->rd ? context_param_list->rd->rrdset : NULL;
- if(after_slot >= (size_t)st->entries)
- error("INTERNAL CHECK: after_slot is invalid %zu, expected 0 to %ld", after_slot, st->entries - 1);
-*/
+ if(unlikely(!st))
+ return NULL;
+ }
- if(points_wanted > (before_wanted - after_wanted) / group / update_every + 1)
- error("INTERNAL CHECK: points_wanted %ld is more than points %ld", points_wanted, (before_wanted - after_wanted) / group / update_every + 1);
+ internal_error(points_wanted != duration / (query_granularity * group) + 1,
+ "QUERY: points_wanted %ld is not points %ld",
+ points_wanted, duration / (query_granularity * group) + 1);
- if(group < resampling_group)
- error("INTERNAL CHECK: group %ld is less than the desired group points %ld", group, resampling_group);
+ internal_error(group < resampling_group,
+ "QUERY: group %ld is less than the desired group points %ld",
+ group, resampling_group);
- if(group > resampling_group && group % resampling_group)
- error("INTERNAL CHECK: group %ld is not a multiple of the desired group points %ld", group, resampling_group);
-#endif
+ internal_error(group > resampling_group && group % resampling_group,
+ "QUERY: group %ld is not a multiple of the desired group points %ld",
+ group, resampling_group);
// -------------------------------------------------------------------------
// initialize our result set
@@ -1438,26 +1931,21 @@ static RRDR *rrd2rrdr_variablestep(
RRDR *r = rrdr_create(owa, st, points_wanted, context_param_list);
if(unlikely(!r)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL CHECK: Cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (uint32_t)duration, points_wanted);
- #endif
- freez(region_info_array);
+ internal_error(true, "QUERY: cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld",
+ st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (uint32_t)duration, points_wanted);
return NULL;
}
if(unlikely(!r->d || !points_wanted)) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL CHECK: Returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%zu, points=%ld", st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (size_t)duration, points_wanted);
- #endif
- freez(region_info_array);
+ internal_error(true, "QUERY: returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%zu, points=%ld",
+ st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (size_t)duration, points_wanted);
return r;
}
- r->result_options |= RRDR_RESULT_OPTION_VARIABLE_STEP;
- if(unlikely(absolute_period_requested == 1))
- r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
- else
+ if(relative_period_requested)
r->result_options |= RRDR_RESULT_OPTION_RELATIVE;
+ else
+ r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
// find how many dimensions we have
long dimensions_count = r->d;
@@ -1466,48 +1954,26 @@ static RRDR *rrd2rrdr_variablestep(
// initialize RRDR
r->group = group;
- r->update_every = (int)group * update_every;
+ r->update_every = (int)(group * query_granularity);
r->before = before_wanted;
r->after = after_wanted;
r->internal.points_wanted = points_wanted;
r->internal.resampling_group = resampling_group;
r->internal.resampling_divisor = resampling_divisor;
-
+ r->internal.query_options = options;
+ r->internal.query_tier = tier;
// -------------------------------------------------------------------------
// assign the processor functions
-
- {
- int i, found = 0;
- for(i = 0; !found && api_v1_data_groups[i].name ;i++) {
- if(api_v1_data_groups[i].value == group_method) {
- r->internal.grouping_create= api_v1_data_groups[i].create;
- r->internal.grouping_reset = api_v1_data_groups[i].reset;
- r->internal.grouping_free = api_v1_data_groups[i].free;
- r->internal.grouping_add = api_v1_data_groups[i].add;
- r->internal.grouping_flush = api_v1_data_groups[i].flush;
- found = 1;
- }
- }
- if(!found) {
- errno = 0;
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: grouping method %u not found for chart '%s'. Using 'average'", (unsigned int)group_method, r->st->name);
- #endif
- r->internal.grouping_create= grouping_create_average;
- r->internal.grouping_reset = grouping_reset_average;
- r->internal.grouping_free = grouping_free_average;
- r->internal.grouping_add = grouping_add_average;
- r->internal.grouping_flush = grouping_flush_average;
- }
- }
+ rrdr_set_grouping_function(r, group_method);
// allocate any memory required by the grouping method
- r->internal.grouping_create(r);
+ r->internal.grouping_create(r, group_options);
// -------------------------------------------------------------------------
// disable the not-wanted dimensions
+
if (context_param_list && !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE))
rrdset_check_rdlock(st);
@@ -1515,19 +1981,22 @@ static RRDR *rrd2rrdr_variablestep(
rrdr_disable_not_selected_dimensions(r, options, dimensions, context_param_list);
+ query_debug_log_fin();
+
// -------------------------------------------------------------------------
// do the work for each dimension
time_t max_after = 0, min_before = 0;
long max_rows = 0;
+ RRDDIM *first_rd = context_param_list ? context_param_list->rd : st->dimensions;
RRDDIM *rd;
long c, dimensions_used = 0, dimensions_nonzero = 0;
struct timeval query_start_time;
struct timeval query_current_time;
- if (timeout)
- now_realtime_timeval(&query_start_time);
- for(rd = temp_rd?temp_rd:st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
+ if (timeout) now_realtime_timeval(&query_start_time);
+
+ for(rd = first_rd, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
// if we need a percentage, we need to calculate all dimensions
if(unlikely(!(options & RRDR_OPTION_PERCENTAGE) && (r->od[c] & RRDR_DIMENSION_HIDDEN))) {
@@ -1539,15 +2008,7 @@ static RRDR *rrd2rrdr_variablestep(
// reset the grouping for the new dimension
r->internal.grouping_reset(r);
- do_dimension_variablestep(
- r
- , points_wanted
- , rd
- , c
- , after_wanted
- , before_wanted
- , options
- );
+ rrd2rrdr_do_dimension(r, points_wanted, rd, c, after_wanted, before_wanted);
if (timeout)
now_realtime_timeval(&query_current_time);
@@ -1562,66 +2023,81 @@ static RRDR *rrd2rrdr_variablestep(
}
else {
if(r->after != max_after) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)max_after, rd->name, (size_t)r->after);
- #endif
+ internal_error(true, "QUERY: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)max_after, rd->name, (size_t)r->after);
+
r->after = (r->after > max_after) ? r->after : max_after;
}
if(r->before != min_before) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)min_before, rd->name, (size_t)r->before);
- #endif
+ internal_error(true, "QUERY: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)min_before, rd->name, (size_t)r->before);
+
r->before = (r->before < min_before) ? r->before : min_before;
}
if(r->rows != max_rows) {
- #ifdef NETDATA_INTERNAL_CHECKS
- error("INTERNAL ERROR: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)max_rows, rd->name, (size_t)r->rows);
- #endif
+ internal_error(true, "QUERY: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
+ st->name, (size_t)max_rows, rd->name, (size_t)r->rows);
+
r->rows = (r->rows > max_rows) ? r->rows : max_rows;
}
}
dimensions_used++;
- if (timeout && (dt_usec(&query_start_time, &query_current_time) / 1000.0) > timeout) {
+ if (timeout && ((NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0) > timeout) {
log_access("QUERY CANCELED RUNTIME EXCEEDED %0.2f ms (LIMIT %d ms)",
- dt_usec(&query_start_time, &query_current_time) / 1000.0, timeout);
+ (NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0, timeout);
r->result_options |= RRDR_RESULT_OPTION_CANCEL;
break;
}
}
- #ifdef NETDATA_INTERNAL_CHECKS
-
+#ifdef NETDATA_INTERNAL_CHECKS
if (dimensions_used) {
if(r->internal.log)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ r->internal.log);
+ rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
+ after_wanted, after_requested, before_wanted, before_requested,
+ points_requested, points_wanted, /*after_slot, before_slot,*/
+ r->internal.log);
if(r->rows != points_wanted)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'points' is not wanted 'points'");
+ rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
+ after_wanted, after_requested, before_wanted, before_requested,
+ points_requested, points_wanted, /*after_slot, before_slot,*/
+ "got 'points' is not wanted 'points'");
- if(aligned && (r->before % group) != 0)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "'before' is not aligned but alignment is required");
+ if(aligned && (r->before % (group * query_granularity)) != 0)
+ rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
+ after_wanted, after_requested, before_wanted,before_wanted,
+ points_requested, points_wanted, /*after_slot, before_slot,*/
+ "'before' is not aligned but alignment is required");
// 'after' should not be aligned, since we start inside the first group
//if(aligned && (r->after % group) != 0)
- // rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
+ // rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
- if(r->before != before_requested)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "chart is not aligned to requested 'before'");
+ if(r->before != before_wanted)
+ rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
+ after_wanted, after_requested, before_wanted, before_requested,
+ points_requested, points_wanted, /*after_slot, before_slot,*/
+ "chart is not aligned to requested 'before'");
if(r->before != before_wanted)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'before' is not wanted 'before'");
+ rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
+ after_wanted, after_requested, before_wanted, before_requested,
+ points_requested, points_wanted, /*after_slot, before_slot,*/
+ "got 'before' is not wanted 'before'");
// reported 'after' varies, depending on group
if(r->after != after_wanted)
- rrd2rrdr_log_request_response_metadata(r, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, /*after_slot, before_slot,*/ "got 'after' is not wanted 'after'");
+ rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
+ after_wanted, after_requested, before_wanted, before_requested,
+ points_requested, points_wanted, /*after_slot, before_slot,*/
+ "got 'after' is not wanted 'after'");
+
}
- #endif
+#endif
// free all resources used by the grouping method
r->internal.grouping_free(r);
@@ -1630,99 +2106,12 @@ static RRDR *rrd2rrdr_variablestep(
if(unlikely(options & RRDR_OPTION_NONZERO && !dimensions_nonzero && !(r->result_options & RRDR_RESULT_OPTION_CANCEL))) {
// all the dimensions are zero
// mark them as NONZERO to send them all
- for(rd = temp_rd?temp_rd:st->dimensions, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
+ for(rd = first_rd, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
r->od[c] |= RRDR_DIMENSION_NONZERO;
}
}
rrdr_query_completed(r->internal.db_points_read, r->internal.result_points_generated);
- freez(region_info_array);
return r;
}
-#endif //#ifdef ENABLE_DBENGINE
-
-RRDR *rrd2rrdr(
- ONEWAYALLOC *owa
- , RRDSET *st
- , long points_requested
- , long long after_requested
- , long long before_requested
- , RRDR_GROUPING group_method
- , long resampling_time_requested
- , RRDR_OPTIONS options
- , const char *dimensions
- , struct context_param *context_param_list
- , int timeout
-)
-{
- int rrd_update_every;
- int absolute_period_requested;
-
- time_t first_entry_t;
- time_t last_entry_t;
- if (context_param_list) {
- first_entry_t = context_param_list->first_entry_t;
- last_entry_t = context_param_list->last_entry_t;
- } else {
- rrdset_rdlock(st);
- first_entry_t = rrdset_first_entry_t_nolock(st);
- last_entry_t = rrdset_last_entry_t_nolock(st);
- rrdset_unlock(st);
- }
-
- rrd_update_every = st->update_every;
- absolute_period_requested = rrdr_convert_before_after_to_absolute(&after_requested, &before_requested,
- rrd_update_every, first_entry_t,
- last_entry_t, options);
- if (options & RRDR_OPTION_ALLOW_PAST)
- if (first_entry_t > after_requested)
- first_entry_t = after_requested;
-
- if (context_param_list && !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE)) {
- rebuild_context_param_list(owa, context_param_list, after_requested);
- st = context_param_list->rd ? context_param_list->rd->rrdset : NULL;
- if (unlikely(!st))
- return NULL;
- }
-
-#ifdef ENABLE_DBENGINE
- if (st->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
- struct rrdeng_region_info *region_info_array;
- unsigned regions, max_interval;
-
- /* This call takes the chart read-lock */
- regions = rrdeng_variable_step_boundaries(st, after_requested, before_requested,
- &region_info_array, &max_interval, context_param_list);
- if (1 == regions) {
- if (region_info_array) {
- if (rrd_update_every != region_info_array[0].update_every) {
- rrd_update_every = region_info_array[0].update_every;
- /* recalculate query alignment */
- absolute_period_requested =
- rrdr_convert_before_after_to_absolute(&after_requested, &before_requested, rrd_update_every,
- first_entry_t, last_entry_t, options);
- }
- freez(region_info_array);
- }
- return rrd2rrdr_fixedstep(owa, st, points_requested, after_requested, before_requested, group_method,
- resampling_time_requested, options, dimensions, rrd_update_every,
- first_entry_t, last_entry_t, absolute_period_requested, context_param_list, timeout);
- } else {
- if (rrd_update_every != (uint16_t)max_interval) {
- rrd_update_every = (uint16_t) max_interval;
- /* recalculate query alignment */
- absolute_period_requested = rrdr_convert_before_after_to_absolute(&after_requested, &before_requested,
- rrd_update_every, first_entry_t,
- last_entry_t, options);
- }
- return rrd2rrdr_variablestep(owa, st, points_requested, after_requested, before_requested, group_method,
- resampling_time_requested, options, dimensions, rrd_update_every,
- first_entry_t, last_entry_t, absolute_period_requested, region_info_array, context_param_list, timeout);
- }
- }
-#endif
- return rrd2rrdr_fixedstep(owa, st, points_requested, after_requested, before_requested, group_method,
- resampling_time_requested, options, dimensions,
- rrd_update_every, first_entry_t, last_entry_t, absolute_period_requested, context_param_list, timeout);
-}
diff --git a/web/api/queries/query.h b/web/api/queries/query.h
index 6b8a51c58..df876d9ac 100644
--- a/web/api/queries/query.h
+++ b/web/api/queries/query.h
@@ -3,6 +3,10 @@
#ifndef NETDATA_API_DATA_QUERY_H
#define NETDATA_API_DATA_QUERY_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
typedef enum rrdr_grouping {
RRDR_GROUPING_UNDEFINED = 0,
RRDR_GROUPING_AVERAGE,
@@ -10,15 +14,46 @@ typedef enum rrdr_grouping {
RRDR_GROUPING_MAX,
RRDR_GROUPING_SUM,
RRDR_GROUPING_INCREMENTAL_SUM,
+ RRDR_GROUPING_TRIMMED_MEAN1,
+ RRDR_GROUPING_TRIMMED_MEAN2,
+ RRDR_GROUPING_TRIMMED_MEAN3,
+ RRDR_GROUPING_TRIMMED_MEAN5,
+ RRDR_GROUPING_TRIMMED_MEAN10,
+ RRDR_GROUPING_TRIMMED_MEAN15,
+ RRDR_GROUPING_TRIMMED_MEAN20,
+ RRDR_GROUPING_TRIMMED_MEAN25,
RRDR_GROUPING_MEDIAN,
+ RRDR_GROUPING_TRIMMED_MEDIAN1,
+ RRDR_GROUPING_TRIMMED_MEDIAN2,
+ RRDR_GROUPING_TRIMMED_MEDIAN3,
+ RRDR_GROUPING_TRIMMED_MEDIAN5,
+ RRDR_GROUPING_TRIMMED_MEDIAN10,
+ RRDR_GROUPING_TRIMMED_MEDIAN15,
+ RRDR_GROUPING_TRIMMED_MEDIAN20,
+ RRDR_GROUPING_TRIMMED_MEDIAN25,
+ RRDR_GROUPING_PERCENTILE25,
+ RRDR_GROUPING_PERCENTILE50,
+ RRDR_GROUPING_PERCENTILE75,
+ RRDR_GROUPING_PERCENTILE80,
+ RRDR_GROUPING_PERCENTILE90,
+ RRDR_GROUPING_PERCENTILE95,
+ RRDR_GROUPING_PERCENTILE97,
+ RRDR_GROUPING_PERCENTILE98,
+ RRDR_GROUPING_PERCENTILE99,
RRDR_GROUPING_STDDEV,
RRDR_GROUPING_CV,
RRDR_GROUPING_SES,
RRDR_GROUPING_DES,
+ RRDR_GROUPING_COUNTIF,
} RRDR_GROUPING;
extern const char *group_method2string(RRDR_GROUPING group);
extern void web_client_api_v1_init_grouping(void);
extern RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def);
+extern const char *web_client_api_request_v1_data_group_to_string(RRDR_GROUPING group);
+
+#ifdef __cplusplus
+}
+#endif
#endif //NETDATA_API_DATA_QUERY_H
diff --git a/web/api/queries/rrdr.c b/web/api/queries/rrdr.c
index 4d05778c1..ecf4ca2ac 100644
--- a/web/api/queries/rrdr.c
+++ b/web/api/queries/rrdr.c
@@ -30,7 +30,7 @@ static void rrdr_dump(RRDR *r)
// for each line in the array
for(i = 0; i < r->rows ;i++) {
- calculated_number *cn = &r->v[ i * r->d ];
+ NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
RRDR_DIMENSION_FLAGS *co = &r->o[ i * r->d ];
// print the id and the timestamp of the line
@@ -44,7 +44,7 @@ static void rrdr_dump(RRDR *r)
if(co[c] & RRDR_EMPTY)
fprintf(stderr, "null ");
else
- fprintf(stderr, CALCULATED_NUMBER_FORMAT " %s%s%s%s "
+ fprintf(stderr, NETDATA_DOUBLE_FORMAT " %s%s%s%s "
, cn[c]
, (co[c] & RRDR_EMPTY)?"E":" "
, (co[c] & RRDR_RESET)?"R":" "
@@ -58,78 +58,65 @@ static void rrdr_dump(RRDR *r)
}
*/
+inline void rrdr_free(ONEWAYALLOC *owa, RRDR *r) {
+ if(unlikely(!r)) return;
-
-
-inline static void rrdr_lock_rrdset(RRDR *r) {
- if(unlikely(!r)) {
- error("NULL value given!");
- return;
- }
-
- rrdset_rdlock(r->st);
- r->has_st_lock = 1;
-}
-
-inline static void rrdr_unlock_rrdset(RRDR *r) {
- if(unlikely(!r)) {
- error("NULL value given!");
- return;
- }
-
- if(likely(r->has_st_lock)) {
- r->has_st_lock = 0;
+ if(likely(r->st_locked_by_rrdr_create))
rrdset_unlock(r->st);
- }
-}
-
-inline void rrdr_free(ONEWAYALLOC *owa, RRDR *r)
-{
- if(unlikely(!r)) {
- error("NULL value given!");
- return;
- }
- rrdr_unlock_rrdset(r);
onewayalloc_freez(owa, r->t);
onewayalloc_freez(owa, r->v);
onewayalloc_freez(owa, r->o);
onewayalloc_freez(owa, r->od);
+ onewayalloc_freez(owa, r->ar);
onewayalloc_freez(owa, r);
}
-RRDR *rrdr_create(ONEWAYALLOC *owa, struct rrdset *st, long n, struct context_param *context_param_list)
-{
- if (unlikely(!st)) {
- error("NULL value given!");
- return NULL;
- }
-
+RRDR *rrdr_create_for_x_dimensions(ONEWAYALLOC *owa, int dimensions, long points) {
RRDR *r = onewayalloc_callocz(owa, 1, sizeof(RRDR));
- r->st = st;
+ r->internal.owa = owa;
+
+ r->d = dimensions;
+ r->n = points;
+
+ r->t = onewayalloc_callocz(owa, points, sizeof(time_t));
+ r->v = onewayalloc_mallocz(owa, points * dimensions * sizeof(NETDATA_DOUBLE));
+ r->o = onewayalloc_mallocz(owa, points * dimensions * sizeof(RRDR_VALUE_FLAGS));
+ r->ar = onewayalloc_mallocz(owa, points * dimensions * sizeof(NETDATA_DOUBLE));
+ r->od = onewayalloc_mallocz(owa, dimensions * sizeof(RRDR_DIMENSION_FLAGS));
+ r->group = 1;
+ r->update_every = 1;
+
+ return r;
+}
+
+RRDR *rrdr_create(ONEWAYALLOC *owa, struct rrdset *st, long n, struct context_param *context_param_list) {
+ if (unlikely(!st)) return NULL;
+
+ bool st_locked_by_rrdr_create = false;
if (!context_param_list || !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE)) {
- rrdr_lock_rrdset(r);
- r->st_needs_lock = 1;
+ rrdset_rdlock(st);
+ st_locked_by_rrdr_create = true;
}
+ // count the number of dimensions
+ int dimensions = 0;
RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
RRDDIM *rd;
if (temp_rd) {
RRDDIM *t = temp_rd;
while (t) {
- r->d++;
+ dimensions++;
t = t->next;
}
} else
- rrddim_foreach_read(rd, st) r->d++;
-
- r->n = n;
+ rrddim_foreach_read(rd, st) dimensions++;
- r->t = onewayalloc_callocz(owa, (size_t)n, sizeof(time_t));
- r->v = onewayalloc_mallocz(owa, n * r->d * sizeof(calculated_number));
- r->o = onewayalloc_mallocz(owa, n * r->d * sizeof(RRDR_VALUE_FLAGS));
- r->od = onewayalloc_mallocz(owa, r->d * sizeof(RRDR_DIMENSION_FLAGS));
+ // create the rrdr
+ RRDR *r = rrdr_create_for_x_dimensions(owa, dimensions, n);
+ r->st = st;
+ r->st_locked_by_rrdr_create = st_locked_by_rrdr_create;
// set the hidden flag on hidden dimensions
int c;
@@ -140,8 +127,5 @@ RRDR *rrdr_create(ONEWAYALLOC *owa, struct rrdset *st, long n, struct context_pa
r->od[c] = RRDR_DIMENSION_DEFAULT;
}
- r->group = 1;
- r->update_every = 1;
-
return r;
}
diff --git a/web/api/queries/rrdr.h b/web/api/queries/rrdr.h
index 87ba6c86b..1c80e103f 100644
--- a/web/api/queries/rrdr.h
+++ b/web/api/queries/rrdr.h
@@ -4,27 +4,46 @@
#define NETDATA_QUERIES_RRDR_H
#include "libnetdata/libnetdata.h"
+#include "web/api/queries/query.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum tier_query_fetch {
+ TIER_QUERY_FETCH_SUM,
+ TIER_QUERY_FETCH_MIN,
+ TIER_QUERY_FETCH_MAX,
+ TIER_QUERY_FETCH_AVERAGE
+} TIER_QUERY_FETCH;
typedef enum rrdr_options {
- RRDR_OPTION_NONZERO = 0x00000001, // don't output dimensions with just zero values
- RRDR_OPTION_REVERSED = 0x00000002, // output the rows in reverse order (oldest to newest)
- RRDR_OPTION_ABSOLUTE = 0x00000004, // values positive, for DATASOURCE_SSV before summing
- RRDR_OPTION_MIN2MAX = 0x00000008, // when adding dimensions, use max - min, instead of sum
- RRDR_OPTION_SECONDS = 0x00000010, // output seconds, instead of dates
- RRDR_OPTION_MILLISECONDS = 0x00000020, // output milliseconds, instead of dates
- RRDR_OPTION_NULL2ZERO = 0x00000040, // do not show nulls, convert them to zeros
- RRDR_OPTION_OBJECTSROWS = 0x00000080, // each row of values should be an object, not an array
- RRDR_OPTION_GOOGLE_JSON = 0x00000100, // comply with google JSON/JSONP specs
- RRDR_OPTION_JSON_WRAP = 0x00000200, // wrap the response in a JSON header with info about the result
- RRDR_OPTION_LABEL_QUOTES = 0x00000400, // in CSV output, wrap header labels in double quotes
- RRDR_OPTION_PERCENTAGE = 0x00000800, // give values as percentage of total
- RRDR_OPTION_NOT_ALIGNED = 0x00001000, // do not align charts for persistent timeframes
- RRDR_OPTION_DISPLAY_ABS = 0x00002000, // for badges, display the absolute value, but calculate colors with sign
- RRDR_OPTION_MATCH_IDS = 0x00004000, // when filtering dimensions, match only IDs
- RRDR_OPTION_MATCH_NAMES = 0x00008000, // when filtering dimensions, match only names
- RRDR_OPTION_CUSTOM_VARS = 0x00010000, // when wrapping response in a JSON, return custom variables in response
- RRDR_OPTION_ALLOW_PAST = 0x00020000, // The after parameter can extend in the past before the first entry
- RRDR_OPTION_ANOMALY_BIT = 0x00040000, // Return the anomaly bit stored in each collected_number
+ RRDR_OPTION_NONZERO = 0x00000001, // don't output dimensions with just zero values
+ RRDR_OPTION_REVERSED = 0x00000002, // output the rows in reverse order (oldest to newest)
+ RRDR_OPTION_ABSOLUTE = 0x00000004, // values positive, for DATASOURCE_SSV before summing
+ RRDR_OPTION_MIN2MAX = 0x00000008, // when adding dimensions, use max - min, instead of sum
+ RRDR_OPTION_SECONDS = 0x00000010, // output seconds, instead of dates
+ RRDR_OPTION_MILLISECONDS = 0x00000020, // output milliseconds, instead of dates
+ RRDR_OPTION_NULL2ZERO = 0x00000040, // do not show nulls, convert them to zeros
+ RRDR_OPTION_OBJECTSROWS = 0x00000080, // each row of values should be an object, not an array
+ RRDR_OPTION_GOOGLE_JSON = 0x00000100, // comply with google JSON/JSONP specs
+ RRDR_OPTION_JSON_WRAP = 0x00000200, // wrap the response in a JSON header with info about the result
+ RRDR_OPTION_LABEL_QUOTES = 0x00000400, // in CSV output, wrap header labels in double quotes
+ RRDR_OPTION_PERCENTAGE = 0x00000800, // give values as percentage of total
+ RRDR_OPTION_NOT_ALIGNED = 0x00001000, // do not align charts for persistent timeframes
+ RRDR_OPTION_DISPLAY_ABS = 0x00002000, // for badges, display the absolute value, but calculate colors with sign
+ RRDR_OPTION_MATCH_IDS = 0x00004000, // when filtering dimensions, match only IDs
+ RRDR_OPTION_MATCH_NAMES = 0x00008000, // when filtering dimensions, match only names
+ RRDR_OPTION_CUSTOM_VARS = 0x00010000, // when wrapping response in a JSON, return custom variables in response
+ RRDR_OPTION_NATURAL_POINTS = 0x00020000, // return the natural points of the database
+ RRDR_OPTION_VIRTUAL_POINTS = 0x00040000, // return virtual points
+ RRDR_OPTION_ANOMALY_BIT = 0x00080000, // Return the anomaly bit stored in each collected_number
+ RRDR_OPTION_RETURN_RAW = 0x00100000, // Return raw data for aggregating across multiple nodes
+ RRDR_OPTION_RETURN_JWAR = 0x00200000, // Return anomaly rates in jsonwrap
+ RRDR_OPTION_SELECTED_TIER = 0x00400000, // Use the selected tier for the query
+
+ // internal ones - not to be exposed to the API
+ RRDR_OPTION_INTERNAL_AR = 0x10000000, // internal use only, to let the formatters we want to render the anomaly rate
} RRDR_OPTIONS;
typedef enum rrdr_value_flag {
@@ -62,40 +81,46 @@ typedef struct rrdresult {
RRDR_DIMENSION_FLAGS *od; // the options for the dimensions
time_t *t; // array of n timestamps
- calculated_number *v; // array n x d values
+ NETDATA_DOUBLE *v; // array n x d values
RRDR_VALUE_FLAGS *o; // array n x d options for each value returned
+ NETDATA_DOUBLE *ar; // array n x d of anomaly rates (0 - 100)
long group; // how many collected values were grouped for each row
int update_every; // what is the suggested update frequency in seconds
- calculated_number min;
- calculated_number max;
+ NETDATA_DOUBLE min;
+ NETDATA_DOUBLE max;
time_t before;
time_t after;
- int has_st_lock; // if st is read locked by us
- uint8_t st_needs_lock; // if ST should be locked
+ bool st_locked_by_rrdr_create; // if st is read locked by us
// internal rrd2rrdr() members below this point
struct {
+ int query_tier; // the selected tier
+ RRDR_OPTIONS query_options; // RRDR_OPTION_* (as run by the query)
+
long points_wanted;
long resampling_group;
- calculated_number resampling_divisor;
+ NETDATA_DOUBLE resampling_divisor;
- void (*grouping_create)(struct rrdresult *r);
+ void (*grouping_create)(struct rrdresult *r, const char *options);
void (*grouping_reset)(struct rrdresult *r);
void (*grouping_free)(struct rrdresult *r);
- void (*grouping_add)(struct rrdresult *r, calculated_number value);
- calculated_number (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+ void (*grouping_add)(struct rrdresult *r, NETDATA_DOUBLE value);
+ NETDATA_DOUBLE (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
void *grouping_data;
+ TIER_QUERY_FETCH tier_query_fetch;
#ifdef NETDATA_INTERNAL_CHECKS
const char *log;
#endif
size_t db_points_read;
size_t result_points_generated;
+ size_t tier_points_read[RRD_STORAGE_TIERS];
+ ONEWAYALLOC *owa;
} internal;
} RRDR;
@@ -104,16 +129,21 @@ typedef struct rrdresult {
#include "database/rrd.h"
extern void rrdr_free(ONEWAYALLOC *owa, RRDR *r);
extern RRDR *rrdr_create(ONEWAYALLOC *owa, struct rrdset *st, long n, struct context_param *context_param_list);
+extern RRDR *rrdr_create_for_x_dimensions(ONEWAYALLOC *owa, int dimensions, long points);
#include "../web_api_v1.h"
#include "web/api/queries/query.h"
extern RRDR *rrd2rrdr(
ONEWAYALLOC *owa,
- RRDSET *st, long points_requested, long long after_requested, long long before_requested,
+ RRDSET *st, long points_wanted, long long after_wanted, long long before_wanted,
RRDR_GROUPING group_method, long resampling_time_requested, RRDR_OPTIONS options, const char *dimensions,
- struct context_param *context_param_list, int timeout);
+ struct context_param *context_param_list, const char *group_options, int timeout, int tier);
+
+extern int rrdr_relative_window_to_absolute(long long *after, long long *before);
-#include "query.h"
+#ifdef __cplusplus
+}
+#endif
#endif //NETDATA_QUERIES_RRDR_H
diff --git a/web/api/queries/ses/ses.c b/web/api/queries/ses/ses.c
index ae4a0fa0d..5e94002c3 100644
--- a/web/api/queries/ses/ses.c
+++ b/web/api/queries/ses/ses.c
@@ -7,9 +7,9 @@
// single exponential smoothing
struct grouping_ses {
- calculated_number alpha;
- calculated_number alpha_other;
- calculated_number level;
+ NETDATA_DOUBLE alpha;
+ NETDATA_DOUBLE alpha_other;
+ NETDATA_DOUBLE level;
size_t count;
};
@@ -25,20 +25,20 @@ void grouping_init_ses(void) {
}
}
-static inline calculated_number window(RRDR *r, struct grouping_ses *g) {
+static inline NETDATA_DOUBLE window(RRDR *r, struct grouping_ses *g) {
(void)g;
- calculated_number points;
+ NETDATA_DOUBLE points;
if(r->group == 1) {
// provide a running DES
- points = r->internal.points_wanted;
+ points = (NETDATA_DOUBLE)r->internal.points_wanted;
}
else {
// provide a SES with flush points
- points = r->group;
+ points = (NETDATA_DOUBLE)r->group;
}
- return (points > max_window_size) ? max_window_size : points;
+ return (points > (NETDATA_DOUBLE)max_window_size) ? (NETDATA_DOUBLE)max_window_size : points;
}
static inline void set_alpha(RRDR *r, struct grouping_ses *g) {
@@ -48,8 +48,8 @@ static inline void set_alpha(RRDR *r, struct grouping_ses *g) {
g->alpha_other = 1.0 - g->alpha;
}
-void grouping_create_ses(RRDR *r) {
- struct grouping_ses *g = (struct grouping_ses *)callocz(1, sizeof(struct grouping_ses));
+void grouping_create_ses(RRDR *r, const char *options __maybe_unused) {
+ struct grouping_ses *g = (struct grouping_ses *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_ses));
set_alpha(r, g);
g->level = 0.0;
r->internal.grouping_data = g;
@@ -64,11 +64,11 @@ void grouping_reset_ses(RRDR *r) {
}
void grouping_free_ses(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_ses(RRDR *r, calculated_number value) {
+void grouping_add_ses(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data;
if(unlikely(!g->count))
@@ -78,10 +78,10 @@ void grouping_add_ses(RRDR *r, calculated_number value) {
g->count++;
}
-calculated_number grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_ses *g = (struct grouping_ses *)r->internal.grouping_data;
- if(unlikely(!g->count || !calculated_number_isnumber(g->level))) {
+ if(unlikely(!g->count || !netdata_double_isnumber(g->level))) {
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
return 0.0;
}
diff --git a/web/api/queries/ses/ses.h b/web/api/queries/ses/ses.h
index c05f208f3..094b8de3f 100644
--- a/web/api/queries/ses/ses.h
+++ b/web/api/queries/ses/ses.h
@@ -8,10 +8,10 @@
extern void grouping_init_ses(void);
-extern void grouping_create_ses(RRDR *r);
+extern void grouping_create_ses(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_ses(RRDR *r);
extern void grouping_free_ses(RRDR *r);
-extern void grouping_add_ses(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_ses(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERIES_SES_H
diff --git a/web/api/queries/stddev/stddev.c b/web/api/queries/stddev/stddev.c
index ffe7a47c0..92a67b42d 100644
--- a/web/api/queries/stddev/stddev.c
+++ b/web/api/queries/stddev/stddev.c
@@ -11,11 +11,11 @@
struct grouping_stddev {
long count;
- calculated_number m_oldM, m_newM, m_oldS, m_newS;
+ NETDATA_DOUBLE m_oldM, m_newM, m_oldS, m_newS;
};
-void grouping_create_stddev(RRDR *r) {
- r->internal.grouping_data = callocz(1, sizeof(struct grouping_stddev));
+void grouping_create_stddev(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_stddev));
}
// resets when switches dimensions
@@ -26,11 +26,11 @@ void grouping_reset_stddev(RRDR *r) {
}
void grouping_free_stddev(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_stddev(RRDR *r, calculated_number value) {
+void grouping_add_stddev(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
g->count++;
@@ -50,26 +50,26 @@ void grouping_add_stddev(RRDR *r, calculated_number value) {
}
}
-static inline calculated_number mean(struct grouping_stddev *g) {
+static inline NETDATA_DOUBLE mean(struct grouping_stddev *g) {
return (g->count > 0) ? g->m_newM : 0.0;
}
-static inline calculated_number variance(struct grouping_stddev *g) {
- return ( (g->count > 1) ? g->m_newS/(g->count - 1) : 0.0 );
+static inline NETDATA_DOUBLE variance(struct grouping_stddev *g) {
+ return ( (g->count > 1) ? g->m_newS/(NETDATA_DOUBLE)(g->count - 1) : 0.0 );
}
-static inline calculated_number stddev(struct grouping_stddev *g) {
- return sqrtl(variance(g));
+static inline NETDATA_DOUBLE stddev(struct grouping_stddev *g) {
+ return sqrtndd(variance(g));
}
-calculated_number grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(likely(g->count > 1)) {
value = stddev(g);
- if(!calculated_number_isnumber(value)) {
+ if(!netdata_double_isnumber(value)) {
value = 0.0;
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
}
@@ -88,16 +88,16 @@ calculated_number grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_op
}
// https://en.wikipedia.org/wiki/Coefficient_of_variation
-calculated_number grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(likely(g->count > 1)) {
- calculated_number m = mean(g);
+ NETDATA_DOUBLE m = mean(g);
value = 100.0 * stddev(g) / ((m < 0)? -m : m);
- if(unlikely(!calculated_number_isnumber(value))) {
+ if(unlikely(!netdata_double_isnumber(value))) {
value = 0.0;
*rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
}
@@ -121,10 +121,10 @@ calculated_number grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FL
/*
* Mean = average
*
-calculated_number grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(unlikely(!g->count)) {
value = 0.0;
@@ -148,10 +148,10 @@ calculated_number grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_opti
/*
* It is not advised to use this version of variance directly
*
-calculated_number grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_stddev *g = (struct grouping_stddev *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(unlikely(!g->count)) {
value = 0.0;
diff --git a/web/api/queries/stddev/stddev.h b/web/api/queries/stddev/stddev.h
index ab58fbe50..c5c91f88d 100644
--- a/web/api/queries/stddev/stddev.h
+++ b/web/api/queries/stddev/stddev.h
@@ -6,13 +6,13 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_stddev(RRDR *r);
+extern void grouping_create_stddev(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_stddev(RRDR *r);
extern void grouping_free_stddev(RRDR *r);
-extern void grouping_add_stddev(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
-extern calculated_number grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
-// extern calculated_number grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
-// extern calculated_number grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_stddev(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern NETDATA_DOUBLE grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+// extern NETDATA_DOUBLE grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+// extern NETDATA_DOUBLE grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERIES_STDDEV_H
diff --git a/web/api/queries/sum/sum.c b/web/api/queries/sum/sum.c
index 6bb012bb0..eec6e2ad0 100644
--- a/web/api/queries/sum/sum.c
+++ b/web/api/queries/sum/sum.c
@@ -6,12 +6,12 @@
// sum
struct grouping_sum {
- calculated_number sum;
+ NETDATA_DOUBLE sum;
size_t count;
};
-void grouping_create_sum(RRDR *r) {
- r->internal.grouping_data = callocz(1, sizeof(struct grouping_sum));
+void grouping_create_sum(RRDR *r, const char *options __maybe_unused) {
+ r->internal.grouping_data = onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_sum));
}
// resets when switches dimensions
@@ -23,20 +23,20 @@ void grouping_reset_sum(RRDR *r) {
}
void grouping_free_sum(RRDR *r) {
- freez(r->internal.grouping_data);
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
r->internal.grouping_data = NULL;
}
-void grouping_add_sum(RRDR *r, calculated_number value) {
+void grouping_add_sum(RRDR *r, NETDATA_DOUBLE value) {
struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data;
g->sum += value;
g->count++;
}
-calculated_number grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+NETDATA_DOUBLE grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
struct grouping_sum *g = (struct grouping_sum *)r->internal.grouping_data;
- calculated_number value;
+ NETDATA_DOUBLE value;
if(unlikely(!g->count)) {
value = 0.0;
diff --git a/web/api/queries/sum/sum.h b/web/api/queries/sum/sum.h
index 05cb6185e..4e7e396e9 100644
--- a/web/api/queries/sum/sum.h
+++ b/web/api/queries/sum/sum.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_sum(RRDR *r);
+extern void grouping_create_sum(RRDR *r, const char *options __maybe_unused);
extern void grouping_reset_sum(RRDR *r);
extern void grouping_free_sum(RRDR *r);
-extern void grouping_add_sum(RRDR *r, calculated_number value);
-extern calculated_number grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+extern void grouping_add_sum(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_SUM_H
diff --git a/web/api/queries/trimmed_mean/Makefile.am b/web/api/queries/trimmed_mean/Makefile.am
new file mode 100644
index 000000000..161784b8f
--- /dev/null
+++ b/web/api/queries/trimmed_mean/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/web/api/queries/trimmed_mean/README.md b/web/api/queries/trimmed_mean/README.md
new file mode 100644
index 000000000..71cdb85db
--- /dev/null
+++ b/web/api/queries/trimmed_mean/README.md
@@ -0,0 +1,56 @@
+<!--
+title: "Trimmed Mean"
+description: "Use trimmed-mean in API queries and health entities to find the average value from a sample, eliminating any unwanted spikes in the returned metrics."
+custom_edit_url: https://github.com/netdata/netdata/edit/master/web/api/queries/trimmed_mean/README.md
+-->
+
+# Trimmed Mean
+
+The trimmed mean is the average value of a series excluding the smallest and biggest points.
+
+Netdata applies linear interpolation on the last point, if the percentage requested to be excluded does not give a
+round number of points.
+
+The following percentile aliases are defined:
+
+- `trimmed-mean1`
+- `trimmed-mean2`
+- `trimmed-mean3`
+- `trimmed-mean5`
+- `trimmed-mean10`
+- `trimmed-mean15`
+- `trimmed-mean20`
+- `trimmed-mean25`
+
+The default `trimmed-mean` is an alias for `trimmed-mean5`.
+Any percentage may be requested using the `group_options` query parameter.
+
+## how to use
+
+Use it in alarms like this:
+
+```
+ alarm: my_alarm
+ on: my_chart
+lookup: trimmed-mean5 -1m unaligned of my_dimension
+ warn: $this > 1000
+```
+
+`trimmed-mean` does not change the units. For example, if the chart units is `requests/sec`, the result
+will be again expressed in the same units.
+
+It can also be used in APIs and badges as `&group=trimmed-mean` in the URL and the additional parameter `group_options`
+may be used to request any percentage (e.g. `&group=trimmed-mean&group_options=29`).
+
+## Examples
+
+Examining last 1 minute `successful` web server responses:
+
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=min&after=-60&label=min)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=average&after=-60&label=average)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=trimmed-mean5&after=-60&label=trimmed-mean5&value_color=orange)
+- ![](https://registry.my-netdata.io/api/v1/badge.svg?chart=web_log_nginx.response_statuses&options=unaligned&dimensions=success&group=max&after=-60&label=max)
+
+## References
+
+- <https://en.wikipedia.org/wiki/Truncated_mean>.
diff --git a/web/api/queries/trimmed_mean/trimmed_mean.c b/web/api/queries/trimmed_mean/trimmed_mean.c
new file mode 100644
index 000000000..2277208a7
--- /dev/null
+++ b/web/api/queries/trimmed_mean/trimmed_mean.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "trimmed_mean.h"
+
+// ----------------------------------------------------------------------------
+// median
+
+struct grouping_trimmed_mean {
+ size_t series_size;
+ size_t next_pos;
+ NETDATA_DOUBLE percent;
+
+ NETDATA_DOUBLE *series;
+};
+
+static void grouping_create_trimmed_mean_internal(RRDR *r, const char *options, NETDATA_DOUBLE def) {
+ long entries = r->group;
+ if(entries < 10) entries = 10;
+
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)onewayalloc_callocz(r->internal.owa, 1, sizeof(struct grouping_trimmed_mean));
+ g->series = onewayalloc_mallocz(r->internal.owa, entries * sizeof(NETDATA_DOUBLE));
+ g->series_size = (size_t)entries;
+
+ g->percent = def;
+ if(options && *options) {
+ g->percent = str2ndd(options, NULL);
+ if(!netdata_double_isnumber(g->percent)) g->percent = 0.0;
+ if(g->percent < 0.0) g->percent = 0.0;
+ if(g->percent > 50.0) g->percent = 50.0;
+ }
+
+ g->percent = 1.0 - ((g->percent / 100.0) * 2.0);
+ r->internal.grouping_data = g;
+}
+
+void grouping_create_trimmed_mean1(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 1.0);
+}
+void grouping_create_trimmed_mean2(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 2.0);
+}
+void grouping_create_trimmed_mean3(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 3.0);
+}
+void grouping_create_trimmed_mean5(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 5.0);
+}
+void grouping_create_trimmed_mean10(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 10.0);
+}
+void grouping_create_trimmed_mean15(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 15.0);
+}
+void grouping_create_trimmed_mean20(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 20.0);
+}
+void grouping_create_trimmed_mean25(RRDR *r, const char *options) {
+ grouping_create_trimmed_mean_internal(r, options, 25.0);
+}
+
+// resets when switches dimensions
+// so, clear everything to restart
+void grouping_reset_trimmed_mean(RRDR *r) {
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
+ g->next_pos = 0;
+}
+
+void grouping_free_trimmed_mean(RRDR *r) {
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
+ if(g) onewayalloc_freez(r->internal.owa, g->series);
+
+ onewayalloc_freez(r->internal.owa, r->internal.grouping_data);
+ r->internal.grouping_data = NULL;
+}
+
+void grouping_add_trimmed_mean(RRDR *r, NETDATA_DOUBLE value) {
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
+
+ if(unlikely(g->next_pos >= g->series_size)) {
+ g->series = onewayalloc_doublesize( r->internal.owa, g->series, g->series_size * sizeof(NETDATA_DOUBLE));
+ g->series_size *= 2;
+ }
+
+ g->series[g->next_pos++] = value;
+}
+
+NETDATA_DOUBLE grouping_flush_trimmed_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr) {
+ struct grouping_trimmed_mean *g = (struct grouping_trimmed_mean *)r->internal.grouping_data;
+
+ NETDATA_DOUBLE value;
+ size_t available_slots = g->next_pos;
+
+ if(unlikely(!available_slots)) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+ else if(available_slots == 1) {
+ value = g->series[0];
+ }
+ else {
+ sort_series(g->series, available_slots);
+
+ NETDATA_DOUBLE min = g->series[0];
+ NETDATA_DOUBLE max = g->series[available_slots - 1];
+
+ if (min != max) {
+ size_t slots_to_use = (size_t)((NETDATA_DOUBLE)available_slots * g->percent);
+ if(!slots_to_use) slots_to_use = 1;
+
+ NETDATA_DOUBLE percent_to_use = (NETDATA_DOUBLE)slots_to_use / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_delta = g->percent - percent_to_use;
+
+ NETDATA_DOUBLE percent_interpolation_slot = 0.0;
+ NETDATA_DOUBLE percent_last_slot = 0.0;
+ if(percent_delta > 0.0) {
+ NETDATA_DOUBLE percent_to_use_plus_1_slot = (NETDATA_DOUBLE)(slots_to_use + 1) / (NETDATA_DOUBLE)available_slots;
+ NETDATA_DOUBLE percent_1slot = percent_to_use_plus_1_slot - percent_to_use;
+
+ percent_interpolation_slot = percent_delta / percent_1slot;
+ percent_last_slot = 1 - percent_interpolation_slot;
+ }
+
+ int start_slot, stop_slot, step, last_slot, interpolation_slot;
+ if(min >= 0.0 && max >= 0.0) {
+ start_slot = (int)((available_slots - slots_to_use) / 2);
+ stop_slot = start_slot + (int)slots_to_use;
+ last_slot = stop_slot - 1;
+ interpolation_slot = stop_slot;
+ step = 1;
+ }
+ else {
+ start_slot = (int)available_slots - 1 - (int)((available_slots - slots_to_use) / 2);
+ stop_slot = start_slot - (int)slots_to_use;
+ last_slot = stop_slot + 1;
+ interpolation_slot = stop_slot;
+ step = -1;
+ }
+
+ value = 0.0;
+ for(int slot = start_slot; slot != stop_slot ; slot += step)
+ value += g->series[slot];
+
+ size_t counted = slots_to_use;
+ if(percent_interpolation_slot > 0.0 && interpolation_slot >= 0 && interpolation_slot < (int)available_slots) {
+ value += g->series[interpolation_slot] * percent_interpolation_slot;
+ value += g->series[last_slot] * percent_last_slot;
+ counted++;
+ }
+
+ value = value / (NETDATA_DOUBLE)counted;
+ }
+ else
+ value = min;
+ }
+
+ if(unlikely(!netdata_double_isnumber(value))) {
+ value = 0.0;
+ *rrdr_value_options_ptr |= RRDR_VALUE_EMPTY;
+ }
+
+ //log_series_to_stderr(g->series, g->next_pos, value, "trimmed_mean");
+
+ g->next_pos = 0;
+
+ return value;
+}
diff --git a/web/api/queries/trimmed_mean/trimmed_mean.h b/web/api/queries/trimmed_mean/trimmed_mean.h
new file mode 100644
index 000000000..1a4f63e9c
--- /dev/null
+++ b/web/api/queries/trimmed_mean/trimmed_mean.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_QUERIES_TRIMMED_MEAN_H
+#define NETDATA_API_QUERIES_TRIMMED_MEAN_H
+
+#include "../query.h"
+#include "../rrdr.h"
+
+extern void grouping_create_trimmed_mean1(RRDR *r, const char *options);
+extern void grouping_create_trimmed_mean2(RRDR *r, const char *options);
+extern void grouping_create_trimmed_mean3(RRDR *r, const char *options);
+extern void grouping_create_trimmed_mean5(RRDR *r, const char *options);
+extern void grouping_create_trimmed_mean10(RRDR *r, const char *options);
+extern void grouping_create_trimmed_mean15(RRDR *r, const char *options);
+extern void grouping_create_trimmed_mean20(RRDR *r, const char *options);
+extern void grouping_create_trimmed_mean25(RRDR *r, const char *options);
+extern void grouping_reset_trimmed_mean(RRDR *r);
+extern void grouping_free_trimmed_mean(RRDR *r);
+extern void grouping_add_trimmed_mean(RRDR *r, NETDATA_DOUBLE value);
+extern NETDATA_DOUBLE grouping_flush_trimmed_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+
+#endif //NETDATA_API_QUERIES_TRIMMED_MEAN_H
diff --git a/web/api/queries/weights.c b/web/api/queries/weights.c
new file mode 100644
index 000000000..97a00f91c
--- /dev/null
+++ b/web/api/queries/weights.c
@@ -0,0 +1,1220 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "daemon/common.h"
+#include "database/KolmogorovSmirnovDist.h"
+
+#define MAX_POINTS 10000
+int enable_metric_correlations = CONFIG_BOOLEAN_YES;
+int metric_correlations_version = 1;
+WEIGHTS_METHOD default_metric_correlations_method = WEIGHTS_METHOD_MC_KS2;
+
+typedef struct weights_stats {
+ NETDATA_DOUBLE max_base_high_ratio;
+ size_t db_points;
+ size_t result_points;
+ size_t db_queries;
+ size_t db_points_per_tier[RRD_STORAGE_TIERS];
+ size_t binary_searches;
+} WEIGHTS_STATS;
+
+// ----------------------------------------------------------------------------
+// parse and render metric correlations methods
+
+static struct {
+ const char *name;
+ WEIGHTS_METHOD value;
+} weights_methods[] = {
+ { "ks2" , WEIGHTS_METHOD_MC_KS2}
+ , { "volume" , WEIGHTS_METHOD_MC_VOLUME}
+ , { "anomaly-rate" , WEIGHTS_METHOD_ANOMALY_RATE}
+ , { NULL , 0 }
+};
+
+WEIGHTS_METHOD weights_string_to_method(const char *method) {
+ for(int i = 0; weights_methods[i].name ;i++)
+ if(strcmp(method, weights_methods[i].name) == 0)
+ return weights_methods[i].value;
+
+ return default_metric_correlations_method;
+}
+
+const char *weights_method_to_string(WEIGHTS_METHOD method) {
+ for(int i = 0; weights_methods[i].name ;i++)
+ if(weights_methods[i].value == method)
+ return weights_methods[i].name;
+
+ return "unknown";
+}
+
+// ----------------------------------------------------------------------------
+// The results per dimension are aggregated into a dictionary
+
+typedef enum {
+ RESULT_IS_BASE_HIGH_RATIO = (1 << 0),
+ RESULT_IS_PERCENTAGE_OF_TIME = (1 << 1),
+} RESULT_FLAGS;
+
+struct register_result {
+ RESULT_FLAGS flags;
+ RRDSET *st;
+ const char *chart_id;
+ const char *context;
+ const char *dim_name;
+ NETDATA_DOUBLE value;
+
+ struct register_result *next; // used to link contexts together
+};
+
+static void register_result_insert_callback(const char *name, void *value, void *data) {
+ (void)name;
+ (void)data;
+
+ struct register_result *t = (struct register_result *)value;
+
+ if(t->chart_id) t->chart_id = strdupz(t->chart_id);
+ if(t->context) t->context = strdupz(t->context);
+ if(t->dim_name) t->dim_name = strdupz(t->dim_name);
+}
+
+static void register_result_delete_callback(const char *name, void *value, void *data) {
+ (void)name;
+ (void)data;
+ struct register_result *t = (struct register_result *)value;
+
+ freez((void *)t->chart_id);
+ freez((void *)t->context);
+ freez((void *)t->dim_name);
+}
+
+static DICTIONARY *register_result_init() {
+ DICTIONARY *results = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
+ dictionary_register_insert_callback(results, register_result_insert_callback, results);
+ dictionary_register_delete_callback(results, register_result_delete_callback, results);
+ return results;
+}
+
+static void register_result_destroy(DICTIONARY *results) {
+ dictionary_destroy(results);
+}
+
+static void register_result(DICTIONARY *results,
+ RRDSET *st,
+ RRDDIM *d,
+ NETDATA_DOUBLE value,
+ RESULT_FLAGS flags,
+ WEIGHTS_STATS *stats,
+ bool register_zero) {
+
+ if(!netdata_double_isnumber(value)) return;
+
+ // make it positive
+ NETDATA_DOUBLE v = fabsndd(value);
+
+ // no need to store zero scored values
+ if(unlikely(fpclassify(v) == FP_ZERO && !register_zero))
+ return;
+
+ // keep track of the max of the baseline / highlight ratio
+ if(flags & RESULT_IS_BASE_HIGH_RATIO && v > stats->max_base_high_ratio)
+ stats->max_base_high_ratio = v;
+
+ struct register_result t = {
+ .flags = flags,
+ .st = st,
+ .chart_id = st->id,
+ .context = st->context,
+ .dim_name = d->name,
+ .value = v
+ };
+
+ char buf[5000 + 1];
+ snprintfz(buf, 5000, "%s:%s", st->id, d->name);
+ dictionary_set(results, buf, &t, sizeof(struct register_result));
+}
+
+// ----------------------------------------------------------------------------
+// Generation of JSON output for the results
+
+static void results_header_to_json(DICTIONARY *results __maybe_unused, BUFFER *wb,
+ long long after, long long before,
+ long long baseline_after, long long baseline_before,
+ long points, WEIGHTS_METHOD method,
+ RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions __maybe_unused, usec_t duration,
+ WEIGHTS_STATS *stats) {
+
+ buffer_sprintf(wb, "{\n"
+ "\t\"after\": %lld,\n"
+ "\t\"before\": %lld,\n"
+ "\t\"duration\": %lld,\n"
+ "\t\"points\": %ld,\n",
+ after,
+ before,
+ before - after,
+ points
+ );
+
+ if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME)
+ buffer_sprintf(wb, ""
+ "\t\"baseline_after\": %lld,\n"
+ "\t\"baseline_before\": %lld,\n"
+ "\t\"baseline_duration\": %lld,\n"
+ "\t\"baseline_points\": %ld,\n",
+ baseline_after,
+ baseline_before,
+ baseline_before - baseline_after,
+ points << shifts
+ );
+
+ buffer_sprintf(wb, ""
+ "\t\"statistics\": {\n"
+ "\t\t\"query_time_ms\": %f,\n"
+ "\t\t\"db_queries\": %zu,\n"
+ "\t\t\"query_result_points\": %zu,\n"
+ "\t\t\"binary_searches\": %zu,\n"
+ "\t\t\"db_points_read\": %zu,\n"
+ "\t\t\"db_points_per_tier\": [ ",
+ (double)duration / (double)USEC_PER_MS,
+ stats->db_queries,
+ stats->result_points,
+ stats->binary_searches,
+ stats->db_points
+ );
+
+ for(int tier = 0; tier < storage_tiers ;tier++)
+ buffer_sprintf(wb, "%s%zu", tier?", ":"", stats->db_points_per_tier[tier]);
+
+ buffer_sprintf(wb, " ]\n"
+ "\t},\n"
+ "\t\"group\": \"%s\",\n"
+ "\t\"method\": \"%s\",\n"
+ "\t\"options\": \"",
+ web_client_api_request_v1_data_group_to_string(group),
+ weights_method_to_string(method)
+ );
+
+ web_client_api_request_v1_data_options_to_string(wb, options);
+}
+
+static size_t registered_results_to_json_charts(DICTIONARY *results, BUFFER *wb,
+ long long after, long long before,
+ long long baseline_after, long long baseline_before,
+ long points, WEIGHTS_METHOD method,
+ RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions, usec_t duration,
+ WEIGHTS_STATS *stats) {
+
+ results_header_to_json(results, wb, after, before, baseline_after, baseline_before,
+ points, method, group, options, shifts, examined_dimensions, duration, stats);
+
+ buffer_strcat(wb, "\",\n\t\"correlated_charts\": {\n");
+
+ size_t charts = 0, chart_dims = 0, total_dimensions = 0;
+ struct register_result *t;
+ RRDSET *last_st = NULL; // never access this - we use it only for comparison
+ dfe_start_read(results, t) {
+ if(!last_st || t->st != last_st) {
+ last_st = t->st;
+
+ if(charts) buffer_strcat(wb, "\n\t\t\t}\n\t\t},\n");
+ buffer_strcat(wb, "\t\t\"");
+ buffer_strcat(wb, t->chart_id);
+ buffer_strcat(wb, "\": {\n");
+ buffer_strcat(wb, "\t\t\t\"context\": \"");
+ buffer_strcat(wb, t->context);
+ buffer_strcat(wb, "\",\n\t\t\t\"dimensions\": {\n");
+ charts++;
+ chart_dims = 0;
+ }
+ if (chart_dims) buffer_sprintf(wb, ",\n");
+ buffer_sprintf(wb, "\t\t\t\t\"%s\": " NETDATA_DOUBLE_FORMAT, t->dim_name, t->value);
+ chart_dims++;
+ total_dimensions++;
+ }
+ dfe_done(t);
+
+ // close dimensions and chart
+ if (total_dimensions)
+ buffer_strcat(wb, "\n\t\t\t}\n\t\t}\n");
+
+ // close correlated_charts
+ buffer_sprintf(wb, "\t},\n"
+ "\t\"correlated_dimensions\": %zu,\n"
+ "\t\"total_dimensions_count\": %zu\n"
+ "}\n",
+ total_dimensions,
+ examined_dimensions
+ );
+
+ return total_dimensions;
+}
+
+static size_t registered_results_to_json_contexts(DICTIONARY *results, BUFFER *wb,
+ long long after, long long before,
+ long long baseline_after, long long baseline_before,
+ long points, WEIGHTS_METHOD method,
+ RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions, usec_t duration,
+ WEIGHTS_STATS *stats) {
+
+ results_header_to_json(results, wb, after, before, baseline_after, baseline_before,
+ points, method, group, options, shifts, examined_dimensions, duration, stats);
+
+ DICTIONARY *context_results = dictionary_create(
+ DICTIONARY_FLAG_SINGLE_THREADED
+ |DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE
+ |DICTIONARY_FLAG_NAME_LINK_DONT_CLONE
+ |DICTIONARY_FLAG_DONT_OVERWRITE_VALUE
+ );
+
+ struct register_result *t;
+ dfe_start_read(results, t) {
+ struct register_result *tc = dictionary_set(context_results, t->context, t, sizeof(*t));
+ if(tc == t)
+ t->next = NULL;
+ else {
+ t->next = tc->next;
+ tc->next = t;
+ }
+ }
+ dfe_done(t);
+
+ buffer_strcat(wb, "\",\n\t\"contexts\": {\n");
+
+ size_t contexts = 0, total_dimensions = 0, charts = 0, context_dims = 0, chart_dims = 0;
+ NETDATA_DOUBLE contexts_total_weight = 0.0, charts_total_weight = 0.0;
+ RRDSET *last_st = NULL; // never access this - we use it only for comparison
+ dfe_start_read(context_results, t) {
+
+ if(contexts)
+ buffer_sprintf(wb, "\n\t\t\t\t\t},\n\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t},\n", charts_total_weight / chart_dims, contexts_total_weight / context_dims);
+
+ contexts++;
+ context_dims = 0;
+ contexts_total_weight = 0.0;
+
+ buffer_strcat(wb, "\t\t\"");
+ buffer_strcat(wb, t->context);
+ buffer_strcat(wb, "\": {\n\t\t\t\"charts\":{\n");
+
+ charts = 0;
+ chart_dims = 0;
+ struct register_result *tt;
+ for(tt = t; tt ; tt = tt->next) {
+ if(!last_st || tt->st != last_st) {
+ last_st = tt->st;
+
+ if(charts)
+ buffer_sprintf(wb, "\n\t\t\t\t\t},\n\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t\t\t},\n", charts_total_weight / chart_dims);
+
+ buffer_strcat(wb, "\t\t\t\t\"");
+ buffer_strcat(wb, tt->chart_id);
+ buffer_strcat(wb, "\": {\n");
+ buffer_strcat(wb, "\t\t\t\t\t\"dimensions\": {\n");
+ charts++;
+ chart_dims = 0;
+ charts_total_weight = 0.0;
+ }
+
+ if (chart_dims) buffer_sprintf(wb, ",\n");
+ buffer_sprintf(wb, "\t\t\t\t\t\t\"%s\": " NETDATA_DOUBLE_FORMAT, tt->dim_name, tt->value);
+ charts_total_weight += tt->value;
+ contexts_total_weight += tt->value;
+ chart_dims++;
+ context_dims++;
+ total_dimensions++;
+ }
+ }
+ dfe_done(t);
+
+ dictionary_destroy(context_results);
+
+ // close dimensions and chart
+ if (total_dimensions)
+ buffer_sprintf(wb, "\n\t\t\t\t\t},\n\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t}\n", charts_total_weight / chart_dims, contexts_total_weight / context_dims);
+
+ // close correlated_charts
+ buffer_sprintf(wb, "\t},\n"
+ "\t\"weighted_dimensions\": %zu,\n"
+ "\t\"total_dimensions_count\": %zu\n"
+ "}\n",
+ total_dimensions,
+ examined_dimensions
+ );
+
+ return total_dimensions;
+}
+
+// ----------------------------------------------------------------------------
+// KS2 algorithm functions
+
+typedef long int DIFFS_NUMBERS;
+#define DOUBLE_TO_INT_MULTIPLIER 100000
+
+static inline int binary_search_bigger_than(const DIFFS_NUMBERS arr[], int left, int size, DIFFS_NUMBERS K) {
+ // binary search to find the index the smallest index
+ // of the first value in the array that is greater than K
+
+ int right = size;
+ while(left < right) {
+ int middle = (int)(((unsigned int)(left + right)) >> 1);
+
+ if(arr[middle] > K)
+ right = middle;
+
+ else
+ left = middle + 1;
+ }
+
+ return left;
+}
+
+int compare_diffs(const void *left, const void *right) {
+ DIFFS_NUMBERS lt = *(DIFFS_NUMBERS *)left;
+ DIFFS_NUMBERS rt = *(DIFFS_NUMBERS *)right;
+
+ // https://stackoverflow.com/a/3886497/1114110
+ return (lt > rt) - (lt < rt);
+}
+
+static size_t calculate_pairs_diff(DIFFS_NUMBERS *diffs, NETDATA_DOUBLE *arr, size_t size) {
+ NETDATA_DOUBLE *last = &arr[size - 1];
+ size_t added = 0;
+
+ while(last > arr) {
+ NETDATA_DOUBLE second = *last--;
+ NETDATA_DOUBLE first = *last;
+ *diffs++ = (DIFFS_NUMBERS)((first - second) * (NETDATA_DOUBLE)DOUBLE_TO_INT_MULTIPLIER);
+ added++;
+ }
+
+ return added;
+}
+
+static double ks_2samp(DIFFS_NUMBERS baseline_diffs[], int base_size, DIFFS_NUMBERS highlight_diffs[], int high_size, uint32_t base_shifts) {
+
+ qsort(baseline_diffs, base_size, sizeof(DIFFS_NUMBERS), compare_diffs);
+ qsort(highlight_diffs, high_size, sizeof(DIFFS_NUMBERS), compare_diffs);
+
+ // Now we should be calculating this:
+ //
+ // For each number in the diffs arrays, we should find the index of the
+ // number bigger than them in both arrays and calculate the % of this index
+ // vs the total array size. Once we have the 2 percentages, we should find
+ // the min and max across the delta of all of them.
+ //
+ // It should look like this:
+ //
+ // base_pcent = binary_search_bigger_than(...) / base_size;
+ // high_pcent = binary_search_bigger_than(...) / high_size;
+ // delta = base_pcent - high_pcent;
+ // if(delta < min) min = delta;
+ // if(delta > max) max = delta;
+ //
+ // This would require a lot of multiplications and divisions.
+ //
+ // To speed it up, we do the binary search to find the index of each number
+ // but then we divide the base index by the power of two number (shifts) it
+ // is bigger than high index. So the 2 indexes are now comparable.
+ // We also keep track of the original indexes with min and max, to properly
+ // calculate their percentages once the loops finish.
+
+
+ // initialize min and max using the first number of baseline_diffs
+ DIFFS_NUMBERS K = baseline_diffs[0];
+ int base_idx = binary_search_bigger_than(baseline_diffs, 1, base_size, K);
+ int high_idx = binary_search_bigger_than(highlight_diffs, 0, high_size, K);
+ int delta = base_idx - (high_idx << base_shifts);
+ int min = delta, max = delta;
+ int base_min_idx = base_idx;
+ int base_max_idx = base_idx;
+ int high_min_idx = high_idx;
+ int high_max_idx = high_idx;
+
+ // do the baseline_diffs starting from 1 (we did position 0 above)
+ for(int i = 1; i < base_size; i++) {
+ K = baseline_diffs[i];
+ base_idx = binary_search_bigger_than(baseline_diffs, i + 1, base_size, K); // starting from i, since data1 is sorted
+ high_idx = binary_search_bigger_than(highlight_diffs, 0, high_size, K);
+
+ delta = base_idx - (high_idx << base_shifts);
+ if(delta < min) {
+ min = delta;
+ base_min_idx = base_idx;
+ high_min_idx = high_idx;
+ }
+ else if(delta > max) {
+ max = delta;
+ base_max_idx = base_idx;
+ high_max_idx = high_idx;
+ }
+ }
+
+ // do the highlight_diffs starting from 0
+ for(int i = 0; i < high_size; i++) {
+ K = highlight_diffs[i];
+ base_idx = binary_search_bigger_than(baseline_diffs, 0, base_size, K);
+ high_idx = binary_search_bigger_than(highlight_diffs, i + 1, high_size, K); // starting from i, since data2 is sorted
+
+ delta = base_idx - (high_idx << base_shifts);
+ if(delta < min) {
+ min = delta;
+ base_min_idx = base_idx;
+ high_min_idx = high_idx;
+ }
+ else if(delta > max) {
+ max = delta;
+ base_max_idx = base_idx;
+ high_max_idx = high_idx;
+ }
+ }
+
+ // now we have the min, max and their indexes
+ // properly calculate min and max as dmin and dmax
+ double dbase_size = (double)base_size;
+ double dhigh_size = (double)high_size;
+ double dmin = ((double)base_min_idx / dbase_size) - ((double)high_min_idx / dhigh_size);
+ double dmax = ((double)base_max_idx / dbase_size) - ((double)high_max_idx / dhigh_size);
+
+ dmin = -dmin;
+ if(islessequal(dmin, 0.0)) dmin = 0.0;
+ else if(isgreaterequal(dmin, 1.0)) dmin = 1.0;
+
+ double d;
+ if(isgreaterequal(dmin, dmax)) d = dmin;
+ else d = dmax;
+
+ double en = round(dbase_size * dhigh_size / (dbase_size + dhigh_size));
+
+ // under these conditions, KSfbar() crashes
+ if(unlikely(isnan(en) || isinf(en) || en == 0.0 || isnan(d) || isinf(d)))
+ return NAN;
+
+ return KSfbar((int)en, d);
+}
+
+static double kstwo(
+ NETDATA_DOUBLE baseline[], int baseline_points,
+ NETDATA_DOUBLE highlight[], int highlight_points, uint32_t base_shifts) {
+ // -1 in size, since the calculate_pairs_diffs() returns one less point
+ DIFFS_NUMBERS baseline_diffs[baseline_points - 1];
+ DIFFS_NUMBERS highlight_diffs[highlight_points - 1];
+
+ int base_size = (int)calculate_pairs_diff(baseline_diffs, baseline, baseline_points);
+ int high_size = (int)calculate_pairs_diff(highlight_diffs, highlight, highlight_points);
+
+ if(unlikely(!base_size || !high_size))
+ return NAN;
+
+ if(unlikely(base_size != baseline_points - 1 || high_size != highlight_points - 1)) {
+ error("Metric correlations: internal error - calculate_pairs_diff() returns the wrong number of entries");
+ return NAN;
+ }
+
+ return ks_2samp(baseline_diffs, base_size, highlight_diffs, high_size, base_shifts);
+}
+
+
+static int rrdset_metric_correlations_ks2(RRDSET *st, DICTIONARY *results,
+ long long baseline_after, long long baseline_before,
+ long long after, long long before,
+ long long points, RRDR_OPTIONS options,
+ RRDR_GROUPING group, const char *group_options, int tier,
+ uint32_t shifts, int timeout,
+ WEIGHTS_STATS *stats, bool register_zero) {
+ options |= RRDR_OPTION_NATURAL_POINTS;
+
+ long group_time = 0;
+ struct context_param *context_param_list = NULL;
+
+ int examined_dimensions = 0;
+
+ RRDR *high_rrdr = NULL;
+ RRDR *base_rrdr = NULL;
+
+ // get first the highlight to find the number of points available
+ stats->db_queries++;
+ usec_t started_usec = now_realtime_usec();
+ ONEWAYALLOC *owa = onewayalloc_create(0);
+ high_rrdr = rrd2rrdr(owa, st, points,
+ after, before, group,
+ group_time, options, NULL, context_param_list, group_options,
+ timeout, tier);
+ if(!high_rrdr) {
+ info("Metric correlations: rrd2rrdr() failed for the highlighted window on chart '%s'.", st->name);
+ goto cleanup;
+ }
+
+ for(int i = 0; i < storage_tiers ;i++)
+ stats->db_points_per_tier[i] += high_rrdr->internal.tier_points_read[i];
+
+ stats->db_points += high_rrdr->internal.db_points_read;
+ stats->result_points += high_rrdr->internal.result_points_generated;
+ if(!high_rrdr->d) {
+ info("Metric correlations: rrd2rrdr() did not return any dimensions on chart '%s'.", st->name);
+ goto cleanup;
+ }
+ if(high_rrdr->result_options & RRDR_RESULT_OPTION_CANCEL) {
+ info("Metric correlations: rrd2rrdr() on highlighted window timed out '%s'.", st->name);
+ goto cleanup;
+ }
+ int high_points = rrdr_rows(high_rrdr);
+
+ usec_t now_usec = now_realtime_usec();
+ if(now_usec - started_usec > timeout * USEC_PER_MS)
+ goto cleanup;
+
+ // get the baseline, requesting the same number of points as the highlight
+ stats->db_queries++;
+ base_rrdr = rrd2rrdr(owa, st,high_points << shifts,
+ baseline_after, baseline_before, group,
+ group_time, options, NULL, context_param_list, group_options,
+ (int)(timeout - ((now_usec - started_usec) / USEC_PER_MS)), tier);
+ if(!base_rrdr) {
+ info("Metric correlations: rrd2rrdr() failed for the baseline window on chart '%s'.", st->name);
+ goto cleanup;
+ }
+
+ for(int i = 0; i < storage_tiers ;i++)
+ stats->db_points_per_tier[i] += base_rrdr->internal.tier_points_read[i];
+
+ stats->db_points += base_rrdr->internal.db_points_read;
+ stats->result_points += base_rrdr->internal.result_points_generated;
+ if(!base_rrdr->d) {
+ info("Metric correlations: rrd2rrdr() did not return any dimensions on chart '%s'.", st->name);
+ goto cleanup;
+ }
+ if (base_rrdr->d != high_rrdr->d) {
+ info("Cannot generate metric correlations for chart '%s' when the baseline and the highlight have different number of dimensions.", st->name);
+ goto cleanup;
+ }
+ if(base_rrdr->result_options & RRDR_RESULT_OPTION_CANCEL) {
+ info("Metric correlations: rrd2rrdr() on baseline window timed out '%s'.", st->name);
+ goto cleanup;
+ }
+ int base_points = rrdr_rows(base_rrdr);
+
+ now_usec = now_realtime_usec();
+ if(now_usec - started_usec > timeout * USEC_PER_MS)
+ goto cleanup;
+
+ // we need at least 2 points to do the job
+ if(base_points < 2 || high_points < 2)
+ goto cleanup;
+
+ // for each dimension
+ RRDDIM *d;
+ int i;
+ for(i = 0, d = base_rrdr->st->dimensions ; d && i < base_rrdr->d; i++, d = d->next) {
+
+ // skip the not evaluated ones
+ if(unlikely(base_rrdr->od[i] & RRDR_DIMENSION_HIDDEN) || (high_rrdr->od[i] & RRDR_DIMENSION_HIDDEN))
+ continue;
+
+ examined_dimensions++;
+
+ // skip the dimensions that are just zero for both the baseline and the highlight
+ if(unlikely(!(base_rrdr->od[i] & RRDR_DIMENSION_NONZERO) && !(high_rrdr->od[i] & RRDR_DIMENSION_NONZERO)))
+ continue;
+
+ // copy the baseline points of the dimension to a contiguous array
+ // there is no need to check for empty values, since empty are already zero
+ NETDATA_DOUBLE baseline[base_points];
+ for(int c = 0; c < base_points; c++)
+ baseline[c] = base_rrdr->v[ c * base_rrdr->d + i ];
+
+ // copy the highlight points of the dimension to a contiguous array
+ // there is no need to check for empty values, since empty values are already zero
+ // https://github.com/netdata/netdata/blob/6e3144683a73a2024d51425b20ecfd569034c858/web/api/queries/average/average.c#L41-L43
+ NETDATA_DOUBLE highlight[high_points];
+ for(int c = 0; c < high_points; c++)
+ highlight[c] = high_rrdr->v[ c * high_rrdr->d + i ];
+
+ stats->binary_searches += 2 * (base_points - 1) + 2 * (high_points - 1);
+
+ double prob = kstwo(baseline, base_points, highlight, high_points, shifts);
+ if(!isnan(prob) && !isinf(prob)) {
+
+ // these conditions should never happen, but still let's check
+ if(unlikely(prob < 0.0)) {
+ error("Metric correlations: kstwo() returned a negative number: %f", prob);
+ prob = -prob;
+ }
+ if(unlikely(prob > 1.0)) {
+ error("Metric correlations: kstwo() returned a number above 1.0: %f", prob);
+ prob = 1.0;
+ }
+
+ // to spread the results evenly, 0.0 needs to be the less correlated and 1.0 the most correlated
+ // so we flip the result of kstwo()
+ register_result(results, base_rrdr->st, d, 1.0 - prob, RESULT_IS_BASE_HIGH_RATIO, stats, register_zero);
+ }
+ }
+
+cleanup:
+ rrdr_free(owa, high_rrdr);
+ rrdr_free(owa, base_rrdr);
+ onewayalloc_destroy(owa);
+ return examined_dimensions;
+}
+
+// ----------------------------------------------------------------------------
+// VOLUME algorithm functions
+
+static int rrdset_metric_correlations_volume(RRDSET *st, DICTIONARY *results,
+ long long baseline_after, long long baseline_before,
+ long long after, long long before,
+ RRDR_OPTIONS options, RRDR_GROUPING group, const char *group_options,
+ int tier, int timeout,
+ WEIGHTS_STATS *stats, bool register_zero) {
+
+ options |= RRDR_OPTION_MATCH_IDS | RRDR_OPTION_ABSOLUTE | RRDR_OPTION_NATURAL_POINTS;
+ long group_time = 0;
+
+ int examined_dimensions = 0;
+ int ret, value_is_null;
+ usec_t started_usec = now_realtime_usec();
+
+ RRDDIM *d;
+ for(d = st->dimensions; d ; d = d->next) {
+ usec_t now_usec = now_realtime_usec();
+ if(now_usec - started_usec > timeout * USEC_PER_MS)
+ return examined_dimensions;
+
+ // we count how many metrics we evaluated
+ examined_dimensions++;
+
+ // there is no point to pass a timeout to these queries
+ // since the query engine checks for a timeout between
+ // dimensions, and we query a single dimension at a time.
+
+ stats->db_queries++;
+ NETDATA_DOUBLE baseline_average = NAN;
+ NETDATA_DOUBLE base_anomaly_rate = 0;
+ value_is_null = 1;
+ ret = rrdset2value_api_v1(st, NULL, &baseline_average, d->id, 1,
+ baseline_after, baseline_before,
+ group, group_options, group_time, options,
+ NULL, NULL,
+ &stats->db_points, stats->db_points_per_tier,
+ &stats->result_points,
+ &value_is_null, &base_anomaly_rate, 0, tier);
+
+ if(ret != HTTP_RESP_OK || value_is_null || !netdata_double_isnumber(baseline_average)) {
+ // this means no data for the baseline window, but we may have data for the highlighted one - assume zero
+ baseline_average = 0.0;
+ }
+
+ stats->db_queries++;
+ NETDATA_DOUBLE highlight_average = NAN;
+ NETDATA_DOUBLE high_anomaly_rate = 0;
+ value_is_null = 1;
+ ret = rrdset2value_api_v1(st, NULL, &highlight_average, d->id, 1,
+ after, before,
+ group, group_options, group_time, options,
+ NULL, NULL,
+ &stats->db_points, stats->db_points_per_tier,
+ &stats->result_points,
+ &value_is_null, &high_anomaly_rate, 0, tier);
+
+ if(ret != HTTP_RESP_OK || value_is_null || !netdata_double_isnumber(highlight_average)) {
+ // this means no data for the highlighted duration - so skip it
+ continue;
+ }
+
+ if(baseline_average == highlight_average) {
+ // they are the same - let's move on
+ continue;
+ }
+
+ stats->db_queries++;
+ NETDATA_DOUBLE highlight_countif = NAN;
+ value_is_null = 1;
+
+ char highlighted_countif_options[50 + 1];
+ snprintfz(highlighted_countif_options, 50, "%s" NETDATA_DOUBLE_FORMAT, highlight_average < baseline_average ? "<":">", baseline_average);
+
+ ret = rrdset2value_api_v1(st, NULL, &highlight_countif, d->id, 1,
+ after, before,
+ RRDR_GROUPING_COUNTIF,highlighted_countif_options,
+ group_time, options,
+ NULL, NULL,
+ &stats->db_points, stats->db_points_per_tier,
+ &stats->result_points,
+ &value_is_null, NULL, 0, tier);
+
+ if(ret != HTTP_RESP_OK || value_is_null || !netdata_double_isnumber(highlight_countif)) {
+ info("MC: highlighted countif query failed, but highlighted average worked - strange...");
+ continue;
+ }
+
+ // this represents the percentage of time
+ // the highlighted window was above/below the baseline window
+ // (above or below depending on their averages)
+ highlight_countif = highlight_countif / 100.0; // countif returns 0 - 100.0
+
+ RESULT_FLAGS flags;
+ NETDATA_DOUBLE pcent = NAN;
+ if(isgreater(baseline_average, 0.0) || isless(baseline_average, 0.0)) {
+ flags = RESULT_IS_BASE_HIGH_RATIO;
+ pcent = (highlight_average - baseline_average) / baseline_average * highlight_countif;
+ }
+ else {
+ flags = RESULT_IS_PERCENTAGE_OF_TIME;
+ pcent = highlight_countif;
+ }
+
+ register_result(results, st, d, pcent, flags, stats, register_zero);
+ }
+
+ return examined_dimensions;
+}
+
+// ----------------------------------------------------------------------------
+// ANOMALY RATE algorithm functions
+
+static int rrdset_weights_anomaly_rate(RRDSET *st, DICTIONARY *results,
+ long long after, long long before,
+ RRDR_OPTIONS options, RRDR_GROUPING group, const char *group_options,
+ int tier, int timeout,
+ WEIGHTS_STATS *stats, bool register_zero) {
+
+ options |= RRDR_OPTION_MATCH_IDS | RRDR_OPTION_ANOMALY_BIT | RRDR_OPTION_NATURAL_POINTS;
+ long group_time = 0;
+
+ int examined_dimensions = 0;
+ int ret, value_is_null;
+ usec_t started_usec = now_realtime_usec();
+
+ RRDDIM *d;
+ for(d = st->dimensions; d ; d = d->next) {
+ usec_t now_usec = now_realtime_usec();
+ if(now_usec - started_usec > timeout * USEC_PER_MS)
+ return examined_dimensions;
+
+ // we count how many metrics we evaluated
+ examined_dimensions++;
+
+ // there is no point to pass a timeout to these queries
+ // since the query engine checks for a timeout between
+ // dimensions, and we query a single dimension at a time.
+
+ stats->db_queries++;
+ NETDATA_DOUBLE average = NAN;
+ NETDATA_DOUBLE anomaly_rate = 0;
+ value_is_null = 1;
+ ret = rrdset2value_api_v1(st, NULL, &average, d->id, 1,
+ after, before,
+ group, group_options, group_time, options,
+ NULL, NULL,
+ &stats->db_points, stats->db_points_per_tier,
+ &stats->result_points,
+ &value_is_null, &anomaly_rate, 0, tier);
+
+ if(ret == HTTP_RESP_OK || !value_is_null || netdata_double_isnumber(average))
+ register_result(results, st, d, average, 0, stats, register_zero);
+ }
+
+ return examined_dimensions;
+}
+
+// ----------------------------------------------------------------------------
+
+int compare_netdata_doubles(const void *left, const void *right) {
+ NETDATA_DOUBLE lt = *(NETDATA_DOUBLE *)left;
+ NETDATA_DOUBLE rt = *(NETDATA_DOUBLE *)right;
+
+ // https://stackoverflow.com/a/3886497/1114110
+ return (lt > rt) - (lt < rt);
+}
+
+static inline int binary_search_bigger_than_netdata_double(const NETDATA_DOUBLE arr[], int left, int size, NETDATA_DOUBLE K) {
+ // binary search to find the index the smallest index
+ // of the first value in the array that is greater than K
+
+ int right = size;
+ while(left < right) {
+ int middle = (int)(((unsigned int)(left + right)) >> 1);
+
+ if(arr[middle] > K)
+ right = middle;
+
+ else
+ left = middle + 1;
+ }
+
+ return left;
+}
+
+// ----------------------------------------------------------------------------
+// spread the results evenly according to their value
+
+static size_t spread_results_evenly(DICTIONARY *results, WEIGHTS_STATS *stats) {
+ struct register_result *t;
+
+ // count the dimensions
+ size_t dimensions = dictionary_stats_entries(results);
+ if(!dimensions) return 0;
+
+ if(stats->max_base_high_ratio == 0.0)
+ stats->max_base_high_ratio = 1.0;
+
+ // create an array of the right size and copy all the values in it
+ NETDATA_DOUBLE slots[dimensions];
+ dimensions = 0;
+ dfe_start_read(results, t) {
+ if(t->flags & (RESULT_IS_PERCENTAGE_OF_TIME))
+ t->value = t->value * stats->max_base_high_ratio;
+
+ slots[dimensions++] = t->value;
+ }
+ dfe_done(t);
+
+ // sort the array with the values of all dimensions
+ qsort(slots, dimensions, sizeof(NETDATA_DOUBLE), compare_netdata_doubles);
+
+ // skip the duplicates in the sorted array
+ NETDATA_DOUBLE last_value = NAN;
+ size_t unique_values = 0;
+ for(size_t i = 0; i < dimensions ;i++) {
+ if(likely(slots[i] != last_value))
+ slots[unique_values++] = last_value = slots[i];
+ }
+
+ // this cannot happen, but coverity thinks otherwise...
+ if(!unique_values)
+ unique_values = dimensions;
+
+ // calculate the weight of each slot, using the number of unique values
+ NETDATA_DOUBLE slot_weight = 1.0 / (NETDATA_DOUBLE)unique_values;
+
+ dfe_start_read(results, t) {
+ int slot = binary_search_bigger_than_netdata_double(slots, 0, (int)unique_values, t->value);
+ NETDATA_DOUBLE v = slot * slot_weight;
+ if(unlikely(v > 1.0)) v = 1.0;
+ v = 1.0 - v;
+ t->value = v;
+ }
+ dfe_done(t);
+
+ return dimensions;
+}
+
+// ----------------------------------------------------------------------------
+// The main function
+
+int web_api_v1_weights(RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS_FORMAT format,
+ RRDR_GROUPING group, const char *group_options,
+ long long baseline_after, long long baseline_before,
+ long long after, long long before,
+ long long points, RRDR_OPTIONS options, SIMPLE_PATTERN *contexts, int tier, int timeout) {
+ WEIGHTS_STATS stats = {};
+
+ DICTIONARY *results = register_result_init();
+ DICTIONARY *charts = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED|DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE);;
+ char *error = NULL;
+ int resp = HTTP_RESP_OK;
+
+ // if the user didn't give a timeout
+ // assume 60 seconds
+ if(!timeout)
+ timeout = 60 * MSEC_PER_SEC;
+
+ // if the timeout is less than 1 second
+ // make it at least 1 second
+ if(timeout < (long)(1 * MSEC_PER_SEC))
+ timeout = 1 * MSEC_PER_SEC;
+
+ usec_t timeout_usec = timeout * USEC_PER_MS;
+ usec_t started_usec = now_realtime_usec();
+
+ if(!rrdr_relative_window_to_absolute(&after, &before))
+ buffer_no_cacheable(wb);
+
+ if (before <= after) {
+ resp = HTTP_RESP_BAD_REQUEST;
+ error = "Invalid selected time-range.";
+ goto cleanup;
+ }
+
+ uint32_t shifts = 0;
+ if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME) {
+ if(!points) points = 500;
+
+ if(baseline_before <= API_RELATIVE_TIME_MAX)
+ baseline_before += after;
+
+ rrdr_relative_window_to_absolute(&baseline_after, &baseline_before);
+
+ if (baseline_before <= baseline_after) {
+ resp = HTTP_RESP_BAD_REQUEST;
+ error = "Invalid baseline time-range.";
+ goto cleanup;
+ }
+
+ // baseline should be a power of two multiple of highlight
+ long long base_delta = baseline_before - baseline_after;
+ long long high_delta = before - after;
+ uint32_t multiplier = (uint32_t)round((double)base_delta / (double)high_delta);
+
+ // check if the multiplier is a power of two
+ // https://stackoverflow.com/a/600306/1114110
+ if((multiplier & (multiplier - 1)) != 0) {
+ // it is not power of two
+ // let's find the closest power of two
+ // https://stackoverflow.com/a/466242/1114110
+ multiplier--;
+ multiplier |= multiplier >> 1;
+ multiplier |= multiplier >> 2;
+ multiplier |= multiplier >> 4;
+ multiplier |= multiplier >> 8;
+ multiplier |= multiplier >> 16;
+ multiplier++;
+ }
+
+ // convert the multiplier to the number of shifts
+ // we need to do, to divide baseline numbers to match
+ // the highlight ones
+ while(multiplier > 1) {
+ shifts++;
+ multiplier = multiplier >> 1;
+ }
+
+ // if the baseline size will not comply to MAX_POINTS
+ // lower the window of the baseline
+ while(shifts && (points << shifts) > MAX_POINTS)
+ shifts--;
+
+ // if the baseline size still does not comply to MAX_POINTS
+ // lower the resolution of the highlight and the baseline
+ while((points << shifts) > MAX_POINTS)
+ points = points >> 1;
+
+ if(points < 15) {
+ resp = HTTP_RESP_BAD_REQUEST;
+ error = "Too few points available, at least 15 are needed.";
+ goto cleanup;
+ }
+
+ // adjust the baseline to be multiplier times bigger than the highlight
+ baseline_after = baseline_before - (high_delta << shifts);
+ }
+
+ // dont lock here and wait for results
+ // get the charts and run mc after
+ RRDSET *st;
+ rrdhost_rdlock(host);
+ rrdset_foreach_read(st, host) {
+ if (rrdset_is_available_for_viewers(st)) {
+ if(!contexts || simple_pattern_matches(contexts, st->context))
+ dictionary_set(charts, st->name, NULL, 0);
+ }
+ }
+ rrdhost_unlock(host);
+
+ size_t examined_dimensions = 0;
+ void *ptr;
+
+ bool register_zero = true;
+ if(options & RRDR_OPTION_NONZERO) {
+ register_zero = false;
+ options &= ~RRDR_OPTION_NONZERO;
+ }
+
+ // for every chart in the dictionary
+ dfe_start_read(charts, ptr) {
+ usec_t now_usec = now_realtime_usec();
+ if(now_usec - started_usec > timeout_usec) {
+ error = "timed out";
+ resp = HTTP_RESP_GATEWAY_TIMEOUT;
+ goto cleanup;
+ }
+
+ st = rrdset_find_byname(host, ptr_name);
+ if(!st) continue;
+
+ rrdset_rdlock(st);
+
+ switch(method) {
+ case WEIGHTS_METHOD_ANOMALY_RATE:
+ options |= RRDR_OPTION_ANOMALY_BIT;
+ points = 1;
+ examined_dimensions += rrdset_weights_anomaly_rate(st, results,
+ after, before,
+ options, group, group_options, tier,
+ (int)(timeout - ((now_usec - started_usec) / USEC_PER_MS)),
+ &stats, register_zero);
+ break;
+
+ case WEIGHTS_METHOD_MC_VOLUME:
+ points = 1;
+ examined_dimensions += rrdset_metric_correlations_volume(st, results,
+ baseline_after, baseline_before,
+ after, before,
+ options, group, group_options, tier,
+ (int)(timeout - ((now_usec - started_usec) / USEC_PER_MS)),
+ &stats, register_zero);
+ break;
+
+ default:
+ case WEIGHTS_METHOD_MC_KS2:
+ examined_dimensions += rrdset_metric_correlations_ks2(st, results,
+ baseline_after, baseline_before,
+ after, before,
+ points, options, group, group_options, tier, shifts,
+ (int)(timeout - ((now_usec - started_usec) / USEC_PER_MS)),
+ &stats, register_zero);
+ break;
+ }
+
+ rrdset_unlock(st);
+ }
+ dfe_done(ptr);
+
+ if(!register_zero)
+ options |= RRDR_OPTION_NONZERO;
+
+ if(!(options & RRDR_OPTION_RETURN_RAW))
+ spread_results_evenly(results, &stats);
+
+ usec_t ended_usec = now_realtime_usec();
+
+ // generate the json output we need
+ buffer_flush(wb);
+
+ size_t added_dimensions = 0;
+ switch(format) {
+ case WEIGHTS_FORMAT_CHARTS:
+ added_dimensions = registered_results_to_json_charts(results, wb,
+ after, before,
+ baseline_after, baseline_before,
+ points, method, group, options, shifts,
+ examined_dimensions,
+ ended_usec - started_usec, &stats);
+ break;
+
+ default:
+ case WEIGHTS_FORMAT_CONTEXTS:
+ added_dimensions = registered_results_to_json_contexts(results, wb,
+ after, before,
+ baseline_after, baseline_before,
+ points, method, group, options, shifts,
+ examined_dimensions,
+ ended_usec - started_usec, &stats);
+ break;
+ }
+
+ if(!added_dimensions) {
+ error = "no results produced.";
+ resp = HTTP_RESP_NOT_FOUND;
+ }
+
+cleanup:
+ if(charts) dictionary_destroy(charts);
+ if(results) register_result_destroy(results);
+
+ if(error) {
+ buffer_flush(wb);
+ buffer_sprintf(wb, "{\"error\": \"%s\" }", error);
+ }
+
+ return resp;
+}
+
+// ----------------------------------------------------------------------------
+// unittest
+
+/*
+
+Unit tests against the output of this:
+
+https://github.com/scipy/scipy/blob/4cf21e753cf937d1c6c2d2a0e372fbc1dbbeea81/scipy/stats/_stats_py.py#L7275-L7449
+
+import matplotlib.pyplot as plt
+import pandas as pd
+import numpy as np
+import scipy as sp
+from scipy import stats
+
+data1 = np.array([ 1111, -2222, 33, 100, 100, 15555, -1, 19999, 888, 755, -1, -730 ])
+data2 = np.array([365, -123, 0])
+data1 = np.sort(data1)
+data2 = np.sort(data2)
+n1 = data1.shape[0]
+n2 = data2.shape[0]
+data_all = np.concatenate([data1, data2])
+cdf1 = np.searchsorted(data1, data_all, side='right') / n1
+cdf2 = np.searchsorted(data2, data_all, side='right') / n2
+print(data_all)
+print("\ndata1", data1, cdf1)
+print("\ndata2", data2, cdf2)
+cddiffs = cdf1 - cdf2
+print("\ncddiffs", cddiffs)
+minS = np.clip(-np.min(cddiffs), 0, 1)
+maxS = np.max(cddiffs)
+print("\nmin", minS)
+print("max", maxS)
+m, n = sorted([float(n1), float(n2)], reverse=True)
+en = m * n / (m + n)
+d = max(minS, maxS)
+prob = stats.distributions.kstwo.sf(d, np.round(en))
+print("\nprob", prob)
+
+*/
+
+static int double_expect(double v, const char *str, const char *descr) {
+ char buf[100 + 1];
+ snprintfz(buf, 100, "%0.6f", v);
+ int ret = strcmp(buf, str) ? 1 : 0;
+
+ fprintf(stderr, "%s %s, expected %s, got %s\n", ret?"FAILED":"OK", descr, str, buf);
+ return ret;
+}
+
+static int mc_unittest1(void) {
+ int bs = 3, hs = 3;
+ DIFFS_NUMBERS base[3] = { 1, 2, 3 };
+ DIFFS_NUMBERS high[3] = { 3, 4, 6 };
+
+ double prob = ks_2samp(base, bs, high, hs, 0);
+ return double_expect(prob, "0.222222", "3x3");
+}
+
+static int mc_unittest2(void) {
+ int bs = 6, hs = 3;
+ DIFFS_NUMBERS base[6] = { 1, 2, 3, 10, 10, 15 };
+ DIFFS_NUMBERS high[3] = { 3, 4, 6 };
+
+ double prob = ks_2samp(base, bs, high, hs, 1);
+ return double_expect(prob, "0.500000", "6x3");
+}
+
+static int mc_unittest3(void) {
+ int bs = 12, hs = 3;
+ DIFFS_NUMBERS base[12] = { 1, 2, 3, 10, 10, 15, 111, 19999, 8, 55, -1, -73 };
+ DIFFS_NUMBERS high[3] = { 3, 4, 6 };
+
+ double prob = ks_2samp(base, bs, high, hs, 2);
+ return double_expect(prob, "0.347222", "12x3");
+}
+
+static int mc_unittest4(void) {
+ int bs = 12, hs = 3;
+ DIFFS_NUMBERS base[12] = { 1111, -2222, 33, 100, 100, 15555, -1, 19999, 888, 755, -1, -730 };
+ DIFFS_NUMBERS high[3] = { 365, -123, 0 };
+
+ double prob = ks_2samp(base, bs, high, hs, 2);
+ return double_expect(prob, "0.777778", "12x3");
+}
+
+int mc_unittest(void) {
+ int errors = 0;
+
+ errors += mc_unittest1();
+ errors += mc_unittest2();
+ errors += mc_unittest3();
+ errors += mc_unittest4();
+
+ return errors;
+}
+
diff --git a/web/api/queries/weights.h b/web/api/queries/weights.h
new file mode 100644
index 000000000..f88a134f2
--- /dev/null
+++ b/web/api/queries/weights.h
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_API_WEIGHTS_H
+#define NETDATA_API_WEIGHTS_H 1
+
+#include "query.h"
+
+typedef enum {
+ WEIGHTS_METHOD_MC_KS2 = 1,
+ WEIGHTS_METHOD_MC_VOLUME = 2,
+ WEIGHTS_METHOD_ANOMALY_RATE = 3,
+} WEIGHTS_METHOD;
+
+typedef enum {
+ WEIGHTS_FORMAT_CHARTS = 1,
+ WEIGHTS_FORMAT_CONTEXTS = 2,
+} WEIGHTS_FORMAT;
+
+extern int enable_metric_correlations;
+extern int metric_correlations_version;
+extern WEIGHTS_METHOD default_metric_correlations_method;
+
+extern int web_api_v1_weights (RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS_FORMAT format,
+ RRDR_GROUPING group, const char *group_options,
+ long long baseline_after, long long baseline_before,
+ long long after, long long before,
+ long long points, RRDR_OPTIONS options, SIMPLE_PATTERN *contexts, int tier, int timeout);
+
+extern WEIGHTS_METHOD weights_string_to_method(const char *method);
+extern const char *weights_method_to_string(WEIGHTS_METHOD method);
+extern int mc_unittest(void);
+
+#endif //NETDATA_API_WEIGHTS_H