summaryrefslogtreecommitdiffstats
path: root/web/api
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2022-11-30 18:47:00 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2022-11-30 18:47:00 +0000
commit03bf87dcb06f7021bfb2df2fa8691593c6148aff (patch)
treee16b06711a2ed77cafb4b7754be0220c3d14a9d7 /web/api
parentAdding upstream version 1.36.1. (diff)
downloadnetdata-upstream/1.37.0.tar.xz
netdata-upstream/1.37.0.zip
Adding upstream version 1.37.0.upstream/1.37.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'web/api')
-rw-r--r--web/api/badges/web_buffer_svg.c23
-rw-r--r--web/api/badges/web_buffer_svg.h6
-rw-r--r--web/api/exporters/allmetrics.h2
-rw-r--r--web/api/exporters/shell/allmetrics_shell.c59
-rw-r--r--web/api/exporters/shell/allmetrics_shell.h4
-rw-r--r--web/api/formatters/charts2json.c37
-rw-r--r--web/api/formatters/charts2json.h6
-rw-r--r--web/api/formatters/csv/csv.c17
-rw-r--r--web/api/formatters/csv/csv.h2
-rw-r--r--web/api/formatters/json/json.c30
-rw-r--r--web/api/formatters/json/json.h2
-rw-r--r--web/api/formatters/json_wrapper.c192
-rw-r--r--web/api/formatters/json_wrapper.h8
-rw-r--r--web/api/formatters/rrd2json.c276
-rw-r--r--web/api/formatters/rrd2json.h79
-rw-r--r--web/api/formatters/rrdset2json.c52
-rw-r--r--web/api/formatters/rrdset2json.h2
-rw-r--r--web/api/formatters/ssv/ssv.c4
-rw-r--r--web/api/formatters/ssv/ssv.h2
-rw-r--r--web/api/formatters/value/value.c70
-rw-r--r--web/api/formatters/value/value.h23
-rw-r--r--web/api/health/health_cmdapi.h2
-rw-r--r--web/api/netdata-swagger.json63
-rw-r--r--web/api/netdata-swagger.yaml42
-rw-r--r--web/api/queries/average/average.h10
-rw-r--r--web/api/queries/countif/countif.h10
-rw-r--r--web/api/queries/des/des.h12
-rw-r--r--web/api/queries/incremental_sum/incremental_sum.h10
-rw-r--r--web/api/queries/max/max.h10
-rw-r--r--web/api/queries/median/median.h26
-rw-r--r--web/api/queries/min/min.h10
-rw-r--r--web/api/queries/percentile/percentile.h26
-rw-r--r--web/api/queries/query.c1012
-rw-r--r--web/api/queries/query.h8
-rw-r--r--web/api/queries/rrdr.c60
-rw-r--r--web/api/queries/rrdr.h111
-rw-r--r--web/api/queries/ses/ses.h12
-rw-r--r--web/api/queries/stddev/stddev.h16
-rw-r--r--web/api/queries/sum/sum.h10
-rw-r--r--web/api/queries/trimmed_mean/trimmed_mean.h24
-rw-r--r--web/api/queries/weights.c763
-rw-r--r--web/api/queries/weights.h14
-rw-r--r--web/api/tests/valid_urls.c13
-rw-r--r--web/api/tests/web_api.c12
-rw-r--r--web/api/web_api_v1.c443
-rw-r--r--web/api/web_api_v1.h49
46 files changed, 1768 insertions, 1896 deletions
diff --git a/web/api/badges/web_buffer_svg.c b/web/api/badges/web_buffer_svg.c
index 00b4ad650..080f2240f 100644
--- a/web/api/badges/web_buffer_svg.c
+++ b/web/api/badges/web_buffer_svg.c
@@ -893,6 +893,10 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u
int group = RRDR_GROUPING_AVERAGE;
uint32_t options = 0x00000000;
+ const RRDCALC_ACQUIRED *rca = NULL;
+ RRDCALC *rc = NULL;
+ RRDSET *st = NULL;
+
while(url) {
char *value = mystrsep(&url, "&");
if(!value || !*value) continue;
@@ -957,7 +961,7 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u
int scale = (scale_str && *scale_str)?str2i(scale_str):100;
- RRDSET *st = rrdset_find(host, chart);
+ st = rrdset_find(host, chart);
if(!st) st = rrdset_find_byname(host, chart);
if(!st) {
buffer_no_cacheable(w->response.data);
@@ -967,9 +971,10 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u
}
st->last_accessed_time = now_realtime_sec();
- RRDCALC *rc = NULL;
if(alarm) {
- rc = rrdcalc_find(st, alarm);
+ rca = rrdcalc_from_rrdset_get(st, alarm);
+ rc = rrdcalc_acquired_to_rrdcalc(rca);
+
if (!rc) {
buffer_no_cacheable(w->response.data);
buffer_svg(w->response.data, "alarm not found", NAN, "", NULL, NULL, -1, scale, 0, -1, -1, NULL, NULL);
@@ -1020,19 +1025,19 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u
label = dim;
}
else
- label = st->name;
+ label = rrdset_name(st);
}
if(!units) {
if(alarm) {
if(rc->units)
- units = rc->units;
+ units = rrdcalc_units(rc);
else
units = "";
}
else if(options & RRDR_OPTION_PERCENTAGE)
units = "%";
else
- units = st->units;
+ units = rrdset_units(st);
}
debug(D_WEB_CLIENT, "%llu: API command 'badge.svg' for chart '%s', alarm '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%d', options '0x%08x'"
@@ -1111,7 +1116,8 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u
points, after, before, group, group_options, 0, options,
NULL, &latest_timestamp,
NULL, NULL, NULL,
- &value_is_null, NULL, 0, 0);
+ &value_is_null, NULL, 0, 0,
+ QUERY_SOURCE_API_BADGE);
// if the value cannot be calculated, show empty badge
if (ret != HTTP_RESP_OK) {
@@ -1143,7 +1149,8 @@ int web_client_api_request_v1_badge(RRDHOST *host, struct web_client *w, char *u
);
}
- cleanup:
+cleanup:
+ rrdcalc_from_rrdset_release(st, rca);
buffer_free(dimensions);
return ret;
}
diff --git a/web/api/badges/web_buffer_svg.h b/web/api/badges/web_buffer_svg.h
index 4853a8864..71857811f 100644
--- a/web/api/badges/web_buffer_svg.h
+++ b/web/api/badges/web_buffer_svg.h
@@ -6,12 +6,12 @@
#include "libnetdata/libnetdata.h"
#include "web/server/web_client.h"
-extern void buffer_svg(BUFFER *wb, const char *label,
+void buffer_svg(BUFFER *wb, const char *label,
NETDATA_DOUBLE value, const char *units, const char *label_color, const char *value_color, int precision, int scale, uint32_t options, int fixed_width_lbl, int fixed_width_val, const char* text_color_lbl, const char* text_color_val);
-extern char *format_value_and_unit(char *value_string, size_t value_string_len,
+char *format_value_and_unit(char *value_string, size_t value_string_len,
NETDATA_DOUBLE value, const char *units, int precision);
-extern int web_client_api_request_v1_badge(struct rrdhost *host, struct web_client *w, char *url);
+int web_client_api_request_v1_badge(struct rrdhost *host, struct web_client *w, char *url);
#include "web/api/web_api_v1.h"
diff --git a/web/api/exporters/allmetrics.h b/web/api/exporters/allmetrics.h
index f076ff0d5..3afc42e28 100644
--- a/web/api/exporters/allmetrics.h
+++ b/web/api/exporters/allmetrics.h
@@ -7,6 +7,6 @@
#include "shell/allmetrics_shell.h"
#include "web/server/web_client.h"
-extern int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_allmetrics(RRDHOST *host, struct web_client *w, char *url);
#endif //NETDATA_API_ALLMETRICS_H
diff --git a/web/api/exporters/shell/allmetrics_shell.c b/web/api/exporters/shell/allmetrics_shell.c
index 615aab43c..0ffbac67b 100644
--- a/web/api/exporters/shell/allmetrics_shell.c
+++ b/web/api/exporters/shell/allmetrics_shell.c
@@ -25,73 +25,71 @@ static inline size_t shell_name_copy(char *d, const char *s, size_t usable) {
void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_string, BUFFER *wb) {
analytics_log_shell();
SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT);
- rrdhost_rdlock(host);
// for each chart
RRDSET *st;
rrdset_foreach_read(st, host) {
- if (filter && !simple_pattern_matches(filter, st->name))
+ if (filter && !simple_pattern_matches(filter, rrdset_name(st)))
continue;
NETDATA_DOUBLE total = 0.0;
char chart[SHELL_ELEMENT_MAX + 1];
- shell_name_copy(chart, st->name?st->name:st->id, SHELL_ELEMENT_MAX);
+ shell_name_copy(chart, st->name?rrdset_name(st):rrdset_id(st), SHELL_ELEMENT_MAX);
- buffer_sprintf(wb, "\n# chart: %s (name: %s)\n", st->id, st->name);
+ buffer_sprintf(wb, "\n# chart: %s (name: %s)\n", rrdset_id(st), rrdset_name(st));
if(rrdset_is_available_for_viewers(st)) {
- rrdset_rdlock(st);
-
// for each dimension
RRDDIM *rd;
rrddim_foreach_read(rd, st) {
if(rd->collections_counter && !rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) {
char dimension[SHELL_ELEMENT_MAX + 1];
- shell_name_copy(dimension, rd->name?rd->name:rd->id, SHELL_ELEMENT_MAX);
+ shell_name_copy(dimension, rd->name?rrddim_name(rd):rrddim_id(rd), SHELL_ELEMENT_MAX);
NETDATA_DOUBLE n = rd->last_stored_value;
if(isnan(n) || isinf(n))
- buffer_sprintf(wb, "NETDATA_%s_%s=\"\" # %s\n", chart, dimension, st->units);
+ buffer_sprintf(wb, "NETDATA_%s_%s=\"\" # %s\n", chart, dimension, rrdset_units(st));
else {
if(rd->multiplier < 0 || rd->divisor < 0) n = -n;
n = roundndd(n);
- if(!rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)) total += n;
- buffer_sprintf(wb, "NETDATA_%s_%s=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, dimension, n, st->units);
+ if(!rrddim_option_check(rd, RRDDIM_OPTION_HIDDEN)) total += n;
+ buffer_sprintf(wb, "NETDATA_%s_%s=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, dimension, n, rrdset_units(st));
}
}
}
+ rrddim_foreach_done(rd);
total = roundndd(total);
- buffer_sprintf(wb, "NETDATA_%s_VISIBLETOTAL=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, total, st->units);
- rrdset_unlock(st);
+ buffer_sprintf(wb, "NETDATA_%s_VISIBLETOTAL=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, total, rrdset_units(st));
}
}
+ rrdset_foreach_done(st);
buffer_strcat(wb, "\n# NETDATA ALARMS RUNNING\n");
RRDCALC *rc;
- for(rc = host->alarms; rc ;rc = rc->next) {
+ foreach_rrdcalc_in_rrdhost_read(host, rc) {
if(!rc->rrdset) continue;
char chart[SHELL_ELEMENT_MAX + 1];
- shell_name_copy(chart, rc->rrdset->name?rc->rrdset->name:rc->rrdset->id, SHELL_ELEMENT_MAX);
+ shell_name_copy(chart, rc->rrdset->name?rrdset_name(rc->rrdset):rrdset_id(rc->rrdset), SHELL_ELEMENT_MAX);
char alarm[SHELL_ELEMENT_MAX + 1];
- shell_name_copy(alarm, rc->name, SHELL_ELEMENT_MAX);
+ shell_name_copy(alarm, rrdcalc_name(rc), SHELL_ELEMENT_MAX);
NETDATA_DOUBLE n = rc->value;
if(isnan(n) || isinf(n))
- buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"\" # %s\n", chart, alarm, rc->units);
+ buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"\" # %s\n", chart, alarm, rrdcalc_units(rc));
else {
n = roundndd(n);
- buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, alarm, n, rc->units);
+ buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_VALUE=\"" NETDATA_DOUBLE_FORMAT_ZERO "\" # %s\n", chart, alarm, n, rrdcalc_units(rc));
}
buffer_sprintf(wb, "NETDATA_ALARM_%s_%s_STATUS=\"%s\"\n", chart, alarm, rrdcalc_status2string(rc->status));
}
+ foreach_rrdcalc_in_rrdhost_done(rc);
- rrdhost_unlock(host);
simple_pattern_free(filter);
}
@@ -100,7 +98,6 @@ void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_
void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_string, BUFFER *wb) {
analytics_log_json();
SIMPLE_PATTERN *filter = simple_pattern_create(filter_string, NULL, SIMPLE_PATTERN_EXACT);
- rrdhost_rdlock(host);
buffer_strcat(wb, "{");
@@ -110,12 +107,10 @@ void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_s
// for each chart
RRDSET *st;
rrdset_foreach_read(st, host) {
- if (filter && !(simple_pattern_matches(filter, st->id) || simple_pattern_matches(filter, st->name)))
+ if (filter && !(simple_pattern_matches(filter, rrdset_id(st)) || simple_pattern_matches(filter, rrdset_name(st))))
continue;
if(rrdset_is_available_for_viewers(st)) {
- rrdset_rdlock(st);
-
buffer_sprintf(
wb,
"%s\n"
@@ -127,12 +122,12 @@ void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_s
"\t\t\"last_updated\": %"PRId64",\n"
"\t\t\"dimensions\": {",
chart_counter ? "," : "",
- st->id,
- st->name,
- st->family,
- st->context,
- st->units,
- (int64_t)rrdset_last_entry_t_nolock(st));
+ rrdset_id(st),
+ rrdset_name(st),
+ rrdset_family(st),
+ rrdset_context(st),
+ rrdset_units(st),
+ (int64_t)rrdset_last_entry_t(st));
chart_counter++;
dimension_counter = 0;
@@ -148,8 +143,8 @@ void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_s
"\t\t\t\t\"name\": \"%s\",\n"
"\t\t\t\t\"value\": ",
dimension_counter ? "," : "",
- rd->id,
- rd->name);
+ rrddim_id(rd),
+ rrddim_name(rd));
if(isnan(rd->last_stored_value))
buffer_strcat(wb, "null");
@@ -161,14 +156,14 @@ void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_s
dimension_counter++;
}
}
+ rrddim_foreach_done(rd);
buffer_strcat(wb, "\n\t\t}\n\t}");
- rrdset_unlock(st);
}
}
+ rrdset_foreach_done(st);
buffer_strcat(wb, "\n}");
- rrdhost_unlock(host);
simple_pattern_free(filter);
}
diff --git a/web/api/exporters/shell/allmetrics_shell.h b/web/api/exporters/shell/allmetrics_shell.h
index 1ee9aa717..d6598e08d 100644
--- a/web/api/exporters/shell/allmetrics_shell.h
+++ b/web/api/exporters/shell/allmetrics_shell.h
@@ -15,7 +15,7 @@
#define ALLMETRICS_JSON 3
#define ALLMETRICS_PROMETHEUS_ALL_HOSTS 4
-extern void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_string, BUFFER *wb);
-extern void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_string, BUFFER *wb);
+void rrd_stats_api_v1_charts_allmetrics_json(RRDHOST *host, const char *filter_string, BUFFER *wb);
+void rrd_stats_api_v1_charts_allmetrics_shell(RRDHOST *host, const char *filter_string, BUFFER *wb);
#endif //NETDATA_API_ALLMETRICS_SHELL_H
diff --git a/web/api/formatters/charts2json.c b/web/api/formatters/charts2json.c
index 4325b6530..1fc20b493 100644
--- a/web/api/formatters/charts2json.c
+++ b/web/api/formatters/charts2json.c
@@ -57,11 +57,11 @@ void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived
",\n\t\"memory_mode\": \"%s\""
",\n\t\"custom_info\": \"%s\""
",\n\t\"charts\": {"
- , host->hostname
- , host->program_version
+ , rrdhost_hostname(host)
+ , rrdhost_program_version(host)
, get_release_channel()
- , host->os
- , host->timezone
+ , rrdhost_os(host)
+ , rrdhost_timezone(host)
, host->rrd_update_every
, host->rrd_history_entries
, rrd_memory_mode_name(host->rrd_memory_mode)
@@ -69,12 +69,11 @@ void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived
);
c = 0;
- rrdhost_rdlock(host);
rrdset_foreach_read(st, host) {
if ((!show_archived && rrdset_is_available_for_viewers(st)) || (show_archived && rrdset_is_archived(st))) {
if(c) buffer_strcat(wb, ",");
buffer_strcat(wb, "\n\t\t\"");
- buffer_strcat(wb, st->id);
+ buffer_strcat(wb, rrdset_id(st));
buffer_strcat(wb, "\": ");
rrdset2json(st, wb, &dimensions, &memory, skip_volatile);
@@ -82,13 +81,14 @@ void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived
st->last_accessed_time = now;
}
}
+ rrdset_foreach_done(st);
RRDCALC *rc;
- for(rc = host->alarms; rc ; rc = rc->next) {
+ foreach_rrdcalc_in_rrdhost_read(host, rc) {
if(rc->rrdset)
alarms++;
}
- rrdhost_unlock(host);
+ foreach_rrdcalc_in_rrdhost_done(rc);
buffer_sprintf(wb
, "\n\t}"
@@ -117,7 +117,7 @@ void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived
"\n\t\t\t\"hostname\": \"%s\""
"\n\t\t}"
, (found > 0) ? "," : ""
- , h->hostname
+ , rrdhost_hostname(h)
);
found++;
@@ -131,7 +131,7 @@ void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived
, "\n\t\t{"
"\n\t\t\t\"hostname\": \"%s\""
"\n\t\t}"
- , host->hostname
+ , rrdhost_hostname(host)
);
}
@@ -141,8 +141,8 @@ void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived
// generate collectors list for the api/v1/info call
struct collector {
- char *plugin;
- char *module;
+ const char *plugin;
+ const char *module;
};
struct array_printer {
@@ -150,9 +150,7 @@ struct array_printer {
BUFFER *wb;
};
-static int print_collector_callback(const char *name, void *entry, void *data) {
- (void)name;
-
+static int print_collector_callback(const DICTIONARY_ITEM *item __maybe_unused, void *entry, void *data) {
struct array_printer *ap = (struct array_printer *)data;
BUFFER *wb = ap->wb;
struct collector *col=(struct collector *) entry;
@@ -167,24 +165,23 @@ static int print_collector_callback(const char *name, void *entry, void *data) {
}
void chartcollectors2json(RRDHOST *host, BUFFER *wb) {
- DICTIONARY *dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
+ DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED);
RRDSET *st;
char name[500];
time_t now = now_realtime_sec();
- rrdhost_rdlock(host);
rrdset_foreach_read(st, host) {
if (rrdset_is_available_for_viewers(st)) {
struct collector col = {
- .plugin = st->plugin_name ? st->plugin_name : "",
- .module = st->module_name ? st->module_name : ""
+ .plugin = rrdset_plugin_name(st),
+ .module = rrdset_module_name(st)
};
sprintf(name, "%s:%s", col.plugin, col.module);
dictionary_set(dict, name, &col, sizeof(struct collector));
st->last_accessed_time = now;
}
}
- rrdhost_unlock(host);
+ rrdset_foreach_done(st);
struct array_printer ap = {
.c = 0,
.wb = wb
diff --git a/web/api/formatters/charts2json.h b/web/api/formatters/charts2json.h
index 2d8cce310..d4b04af58 100644
--- a/web/api/formatters/charts2json.h
+++ b/web/api/formatters/charts2json.h
@@ -5,8 +5,8 @@
#include "rrd2json.h"
-extern void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived);
-extern void chartcollectors2json(RRDHOST *host, BUFFER *wb);
-extern const char* get_release_channel();
+void charts2json(RRDHOST *host, BUFFER *wb, int skip_volatile, int show_archived);
+void chartcollectors2json(RRDHOST *host, BUFFER *wb);
+const char* get_release_channel();
#endif //NETDATA_API_FORMATTER_CHARTS2JSON_H
diff --git a/web/api/formatters/csv/csv.c b/web/api/formatters/csv/csv.c
index 6d87ca374..603a17169 100644
--- a/web/api/formatters/csv/csv.c
+++ b/web/api/formatters/csv/csv.c
@@ -3,15 +3,14 @@
#include "libnetdata/libnetdata.h"
#include "csv.h"
-void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines, RRDDIM *temp_rd) {
- rrdset_check_rdlock(r->st);
-
+void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines) {
//info("RRD2CSV(): %s: BEGIN", r->st->id);
+ QUERY_TARGET *qt = r->internal.qt;
long c, i;
- RRDDIM *d;
+ const long used = qt->query.used;
// print the csv header
- for(c = 0, i = 0, d = temp_rd?temp_rd:r->st->dimensions; d && c < r->d ;c++, d = d->next) {
+ for(c = 0, i = 0; c < used ; c++) {
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
@@ -23,7 +22,7 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const
}
buffer_strcat(wb, separator);
if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
- buffer_strcat(wb, d->name);
+ buffer_strcat(wb, string2str(qt->query.array[c].dimension.name));
if(options & RRDR_OPTION_LABEL_QUOTES) buffer_strcat(wb, "\"");
i++;
}
@@ -31,7 +30,7 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const
if(format == DATASOURCE_CSV_MARKDOWN) {
// print the --- line after header
- for(c = 0, i = 0, d = temp_rd?temp_rd:r->st->dimensions; d && c < r->d ;c++, d = d->next) {
+ for(c = 0, i = 0; c < used ;c++) {
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
@@ -89,7 +88,7 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const
int set_min_max = 0;
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
total = 0;
- for(c = 0, d = temp_rd?temp_rd:r->st->dimensions; d && c < r->d ;c++, d = d->next) {
+ for(c = 0; c < used ;c++) {
NETDATA_DOUBLE n = cn[c];
if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
@@ -103,7 +102,7 @@ void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const
}
// for each dimension
- for(c = 0, d = temp_rd?temp_rd:r->st->dimensions; d && c < r->d ;c++, d = d->next) {
+ for(c = 0; c < used ;c++) {
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
diff --git a/web/api/formatters/csv/csv.h b/web/api/formatters/csv/csv.h
index cf6020de4..666d4c660 100644
--- a/web/api/formatters/csv/csv.h
+++ b/web/api/formatters/csv/csv.h
@@ -5,7 +5,7 @@
#include "web/api/queries/rrdr.h"
-extern void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines, RRDDIM *temp_rd);
+void rrdr2csv(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, const char *startline, const char *separator, const char *endline, const char *betweenlines);
#include "../rrd2json.h"
diff --git a/web/api/formatters/json/json.c b/web/api/formatters/json/json.c
index 6f07b9aa4..608150cba 100644
--- a/web/api/formatters/json/json.c
+++ b/web/api/formatters/json/json.c
@@ -5,15 +5,7 @@
#define JSON_DATES_JS 1
#define JSON_DATES_TIMESTAMP 2
-void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, struct context_param *context_param_list)
-{
- RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
-
- int should_lock = (!context_param_list || !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE));
-
- if (should_lock)
- rrdset_check_rdlock(r->st);
-
+void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
//info("RRD2JSON(): %s: BEGIN", r->st->id);
int row_annotations = 0, dates, dates_with_new = 0;
char kq[2] = "", // key quote
@@ -112,21 +104,21 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, struct
// -------------------------------------------------------------------------
// print the JSON header
+ QUERY_TARGET *qt = r->internal.qt;
long c, i;
- RRDDIM *rd;
+ const long used = qt->query.used;
// print the header lines
- for(c = 0, i = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ for(c = 0, i = 0; c < used ; c++) {
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
buffer_fast_strcat(wb, pre_label, pre_label_len);
- buffer_strcat(wb, rd->name);
-// buffer_strcat(wb, ".");
-// buffer_strcat(wb, rd->rrdset->name);
+ buffer_strcat(wb, string2str(qt->query.array[c].dimension.name));
buffer_fast_strcat(wb, post_label, post_label_len);
i++;
}
+
if(!i) {
buffer_fast_strcat(wb, pre_label, pre_label_len);
buffer_fast_strcat(wb, "no data", 7);
@@ -134,7 +126,7 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, struct
}
size_t total_number_of_dimensions = i;
- // print the begin of row data
+ // print the beginning of row data
buffer_strcat(wb, data_begin);
// if all dimensions are hidden, print a null
@@ -187,7 +179,7 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, struct
if(unlikely(row_annotations)) {
// google supports one annotation per row
int annotation_found = 0;
- for(c = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd ;c++, rd = rd->next) {
+ for(c = 0; c < used ; c++) {
if(unlikely(!(r->od[c] & RRDR_DIMENSION_SELECTED))) continue;
if(unlikely(co[c] & RRDR_VALUE_RESET)) {
@@ -222,7 +214,7 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, struct
int set_min_max = 0;
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
total = 0;
- for(c = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ for(c = 0; c < used ;c++) {
NETDATA_DOUBLE n;
if(unlikely(options & RRDR_OPTION_INTERNAL_AR))
n = ar[c];
@@ -240,7 +232,7 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, struct
}
// for each dimension
- for(c = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ for(c = 0; c < used ;c++) {
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
@@ -253,7 +245,7 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, struct
buffer_fast_strcat(wb, pre_value, pre_value_len);
if(unlikely( options & RRDR_OPTION_OBJECTSROWS ))
- buffer_sprintf(wb, "%s%s%s: ", kq, rd->name, kq);
+ buffer_sprintf(wb, "%s%s%s: ", kq, string2str(qt->query.array[c].dimension.name), kq);
if(co[c] & RRDR_VALUE_EMPTY && !(options & RRDR_OPTION_INTERNAL_AR)) {
if(unlikely(options & RRDR_OPTION_NULL2ZERO))
diff --git a/web/api/formatters/json/json.h b/web/api/formatters/json/json.h
index 5c4e11371..fb59e5c9a 100644
--- a/web/api/formatters/json/json.h
+++ b/web/api/formatters/json/json.h
@@ -5,6 +5,6 @@
#include "../rrd2json.h"
-extern void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable, struct context_param *context_param_list);
+void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable);
#endif //NETDATA_API_FORMATTER_JSON_H
diff --git a/web/api/formatters/json_wrapper.c b/web/api/formatters/json_wrapper.c
index 04cace2fb..8b9b7522c 100644
--- a/web/api/formatters/json_wrapper.c
+++ b/web/api/formatters/json_wrapper.c
@@ -7,9 +7,7 @@ struct value_output {
BUFFER *wb;
};
-static int value_list_output(const char *name, void *entry, void *data) {
- (void)name;
-
+static int value_list_output_callback(const DICTIONARY_ITEM *item __maybe_unused, void *entry, void *data) {
struct value_output *ap = (struct value_output *)data;
BUFFER *wb = ap->wb;
char *output = (char *) entry;
@@ -35,25 +33,17 @@ static int fill_formatted_callback(const char *name, const char *value, RRDLABEL
}
void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value,
- RRDR_GROUPING group_method, QUERY_PARAMS *rrdset_query_data)
+ RRDR_GROUPING group_method)
{
- struct context_param *context_param_list = rrdset_query_data->context_param_list;
- char *chart_label_key = rrdset_query_data->chart_label_key;
-
- RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
- int should_lock = (!context_param_list || !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE));
- uint8_t context_mode = (!context_param_list || (context_param_list->flags & CONTEXT_FLAGS_CONTEXT));
-
- if (should_lock)
- rrdset_check_rdlock(r->st);
+ QUERY_TARGET *qt = r->internal.qt;
long rows = rrdr_rows(r);
long c, i;
- RRDDIM *rd;
+ const long query_used = qt->query.used;
//info("JSONWRAPPER(): %s: BEGIN", r->st->id);
char kq[2] = "", // key quote
- sq[2] = ""; // string quote
+ sq[2] = ""; // string quote
if( options & RRDR_OPTION_GOOGLE_JSON ) {
kq[0] = '\0';
@@ -64,52 +54,47 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
sq[0] = '"';
}
- if (should_lock)
- rrdset_rdlock(r->st);
buffer_sprintf(wb, "{\n"
" %sapi%s: 1,\n"
" %sid%s: %s%s%s,\n"
" %sname%s: %s%s%s,\n"
- " %sview_update_every%s: %d,\n"
- " %supdate_every%s: %d,\n"
- " %sfirst_entry%s: %u,\n"
- " %slast_entry%s: %u,\n"
- " %sbefore%s: %u,\n"
- " %safter%s: %u,\n"
+ " %sview_update_every%s: %lld,\n"
+ " %supdate_every%s: %lld,\n"
+ " %sfirst_entry%s: %lld,\n"
+ " %slast_entry%s: %lld,\n"
+ " %sbefore%s: %lld,\n"
+ " %safter%s: %lld,\n"
" %sgroup%s: %s%s%s,\n"
" %soptions%s: %s"
, kq, kq
- , kq, kq, sq, context_mode && temp_rd?r->st->context:r->st->id, sq
- , kq, kq, sq, context_mode && temp_rd?r->st->context:r->st->name, sq
- , kq, kq, r->update_every
- , kq, kq, r->st->update_every
- , kq, kq, (uint32_t) (context_param_list ? context_param_list->first_entry_t : rrdset_first_entry_t_nolock(r->st))
- , kq, kq, (uint32_t) (context_param_list ? context_param_list->last_entry_t : rrdset_last_entry_t_nolock(r->st))
- , kq, kq, (uint32_t)r->before
- , kq, kq, (uint32_t)r->after
+ , kq, kq, sq, qt->id, sq
+ , kq, kq, sq, qt->id, sq
+ , kq, kq, (long long)r->update_every
+ , kq, kq, (long long)qt->db.minimum_latest_update_every
+ , kq, kq, (long long)qt->db.first_time_t
+ , kq, kq, (long long)qt->db.last_time_t
+ , kq, kq, (long long)r->before
+ , kq, kq, (long long)r->after
, kq, kq, sq, web_client_api_request_v1_data_group_to_string(group_method), sq
, kq, kq, sq);
- web_client_api_request_v1_data_options_to_string(wb, r->internal.query_options);
+ web_client_api_request_v1_data_options_to_buffer(wb, r->internal.query_options);
buffer_sprintf(wb, "%s,\n %sdimension_names%s: [", sq, kq, kq);
- if (should_lock)
- rrdset_unlock(r->st);
-
- for(c = 0, i = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ for(c = 0, i = 0; c < query_used ; c++) {
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
if(i) buffer_strcat(wb, ", ");
buffer_strcat(wb, sq);
- buffer_strcat(wb, rd->name);
+ buffer_strcat(wb, string2str(qt->query.array[c].dimension.name));
buffer_strcat(wb, sq);
i++;
}
if(!i) {
#ifdef NETDATA_INTERNAL_CHECKS
- error("RRDR is empty for %s (RRDR has %d dimensions, options is 0x%08x)", r->st->id, r->d, options);
+ error("QUERY: '%s', RRDR is empty, %zu dimensions, options is 0x%08x", qt->id, r->d, options);
#endif
rows = 0;
buffer_strcat(wb, sq);
@@ -121,13 +106,13 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
" %sdimension_ids%s: ["
, kq, kq);
- for(c = 0, i = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ for(c = 0, i = 0; c < query_used ; c++) {
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
if(i) buffer_strcat(wb, ", ");
buffer_strcat(wb, sq);
- buffer_strcat(wb, rd->id);
+ buffer_strcat(wb, string2str(qt->query.array[c].dimension.id));
buffer_strcat(wb, sq);
i++;
}
@@ -139,7 +124,7 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
}
buffer_strcat(wb, "],\n");
- if (rrdset_query_data->show_dimensions) {
+ if (r->internal.query_options & RRDR_OPTION_ALL_DIMENSIONS) {
buffer_sprintf(wb, " %sfull_dimension_list%s: [", kq, kq);
char name[RRD_ID_LENGTH_MAX * 2 + 2];
@@ -147,58 +132,97 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
struct value_output co = {.c = 0, .wb = wb};
- DICTIONARY *dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
- for (i = 0, rd = temp_rd ? temp_rd : r->st->dimensions; rd; rd = rd->next) {
- snprintfz(name, RRD_ID_LENGTH_MAX * 2, "%s:%s", rd->id, rd->name);
- int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\",\"%s\"]", rd->id, rd->name);
- dictionary_set(dict, name, output, len+1);
+ DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
+ for (c = 0; c < (long)qt->metrics.used ;c++) {
+ snprintfz(name, RRD_ID_LENGTH_MAX * 2 + 1, "%s:%s",
+ rrdmetric_acquired_id(qt->metrics.array[c]),
+ rrdmetric_acquired_name(qt->metrics.array[c]));
+
+ int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\",\"%s\"]",
+ rrdmetric_acquired_id(qt->metrics.array[c]),
+ rrdmetric_acquired_name(qt->metrics.array[c]));
+
+ dictionary_set(dict, name, output, len + 1);
}
- dictionary_walkthrough_read(dict, value_list_output, &co);
+ dictionary_walkthrough_read(dict, value_list_output_callback, &co);
dictionary_destroy(dict);
co.c = 0;
buffer_sprintf(wb, "],\n %sfull_chart_list%s: [", kq, kq);
- dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
- for (i = 0, rd = temp_rd ? temp_rd : r->st->dimensions; rd; rd = rd->next) {
- int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\",\"%s\"]", rd->rrdset->id, rd->rrdset->name);
- snprintfz(name, RRD_ID_LENGTH_MAX * 2, "%s:%s", rd->rrdset->id, rd->rrdset->name);
+ dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
+ for (c = 0; c < (long)qt->instances.used ; c++) {
+ RRDINSTANCE_ACQUIRED *ria = qt->instances.array[c];
+
+ snprintfz(name, RRD_ID_LENGTH_MAX * 2 + 1, "%s:%s",
+ rrdinstance_acquired_id(ria),
+ rrdinstance_acquired_name(ria));
+
+ int len = snprintfz(output, RRD_ID_LENGTH_MAX * 2 + 7, "[\"%s\",\"%s\"]",
+ rrdinstance_acquired_id(ria),
+ rrdinstance_acquired_name(ria));
+
dictionary_set(dict, name, output, len + 1);
}
-
- dictionary_walkthrough_read(dict, value_list_output, &co);
+ dictionary_walkthrough_read(dict, value_list_output_callback, &co);
dictionary_destroy(dict);
- RRDSET *st;
co.c = 0;
buffer_sprintf(wb, "],\n %sfull_chart_labels%s: [", kq, kq);
- dict = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
- for (i = 0, rd = temp_rd ? temp_rd : r->st->dimensions; rd; rd = rd->next) {
- st = rd->rrdset;
- if (st->state && st->state->chart_labels)
- rrdlabels_walkthrough_read(st->state->chart_labels, fill_formatted_callback, dict);
+ dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
+ for (c = 0; c < (long)qt->instances.used ; c++) {
+ RRDINSTANCE_ACQUIRED *ria = qt->instances.array[c];
+ rrdlabels_walkthrough_read(rrdinstance_acquired_labels(ria), fill_formatted_callback, dict);
}
- dictionary_walkthrough_read(dict, value_list_output, &co);
+ dictionary_walkthrough_read(dict, value_list_output_callback, &co);
dictionary_destroy(dict);
buffer_strcat(wb, "],\n");
}
- // Composite charts
- if (context_mode && temp_rd) {
+ // functions
+ {
+ DICTIONARY *funcs = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
+ RRDINSTANCE_ACQUIRED *ria = NULL;
+ for (c = 0; c < query_used ; c++) {
+ QUERY_METRIC *qm = &qt->query.array[c];
+ if(qm->link.ria == ria)
+ continue;
+
+ ria = qm->link.ria;
+ chart_functions_to_dict(rrdinstance_acquired_functions(ria), funcs);
+ }
+
+ buffer_sprintf(wb, " %sfunctions%s: [", kq, kq);
+ void *t; (void)t;
+ dfe_start_read(funcs, t) {
+ const char *comma = "";
+ if(t_dfe.counter) comma = ", ";
+ buffer_sprintf(wb, "%s%s%s%s", comma, sq, t_dfe.name, sq);
+ }
+ dfe_done(t);
+ dictionary_destroy(funcs);
+ buffer_strcat(wb, "],\n");
+ }
+
+ // context query
+ if (!qt->request.st) {
buffer_sprintf(
wb,
" %schart_ids%s: [",
kq, kq);
- for (c = 0, i = 0, rd = temp_rd ; rd && c < r->d; c++, rd = rd->next) {
+ for (c = 0, i = 0; c < query_used; c++) {
+ QUERY_METRIC *qm = &qt->query.array[c];
+
if (unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN))
continue;
+
if (unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO)))
continue;
if (i)
buffer_strcat(wb, ", ");
buffer_strcat(wb, sq);
- buffer_strcat(wb, rd->rrdset->id);
+ buffer_strcat(wb, string2str(qm->chart.id));
buffer_strcat(wb, sq);
i++;
}
@@ -209,29 +233,29 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
buffer_strcat(wb, sq);
}
buffer_strcat(wb, "],\n");
- if (chart_label_key) {
+ if (qt->instances.chart_label_key_pattern) {
buffer_sprintf(wb, " %schart_labels%s: { ", kq, kq);
- SIMPLE_PATTERN *pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
- SIMPLE_PATTERN *original_pattern = pattern;
+ SIMPLE_PATTERN *pattern = qt->instances.chart_label_key_pattern;
char *label_key = NULL;
int keys = 0;
while (pattern && (label_key = simple_pattern_iterate(&pattern))) {
-
if (keys)
buffer_strcat(wb, ", ");
buffer_sprintf(wb, "%s%s%s : [", kq, label_key, kq);
keys++;
- for (c = 0, i = 0, rd = temp_rd; rd && c < r->d; c++, rd = rd->next) {
+ for (c = 0, i = 0; c < query_used; c++) {
+ QUERY_METRIC *qm = &qt->query.array[c];
+
if (unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN))
continue;
if (unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO)))
continue;
+
if (i)
buffer_strcat(wb, ", ");
-
- rrdlabels_get_value_to_buffer_or_null(rd->rrdset->state->chart_labels, wb, label_key, sq, "null");
+ rrdlabels_get_value_to_buffer_or_null(rrdinstance_acquired_labels(qm->link.ria), wb, label_key, sq, "null");
i++;
}
if (!i) {
@@ -243,33 +267,26 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
buffer_strcat(wb, "]");
}
buffer_strcat(wb, "},\n");
- simple_pattern_free(original_pattern);
}
}
buffer_sprintf(wb, " %slatest_values%s: ["
, kq, kq);
- for(c = 0, i = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ for(c = 0, i = 0; c < query_used ;c++) {
+ QUERY_METRIC *qm = &qt->query.array[c];
+
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
if(i) buffer_strcat(wb, ", ");
i++;
- NETDATA_DOUBLE value = rd->last_stored_value;
+ NETDATA_DOUBLE value = rrdmetric_acquired_last_stored_value(qm->link.rma);
if (NAN == value)
buffer_strcat(wb, "null");
else
buffer_rrd_value(wb, value);
- /*
- storage_number n = rd->values[rrdset_last_slot(r->st)];
-
- if(!does_storage_number_exist(n))
- buffer_strcat(wb, "null");
- else
- buffer_rrd_value(wb, unpack_storage_number(n));
- */
}
if(!i) {
rows = 0;
@@ -286,7 +303,7 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
total = 0;
- for(c = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ for(c = 0; c < query_used ;c++) {
NETDATA_DOUBLE *cn = &r->v[ (rrdr_rows(r) - 1) * r->d ];
NETDATA_DOUBLE n = cn[c];
@@ -299,7 +316,7 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
if(total == 0) total = 1;
}
- for(c = 0, i = 0, rd = temp_rd?temp_rd:r->st->dimensions; rd && c < r->d ;c++, rd = rd->next) {
+ for(c = 0, i = 0; c < query_used ;c++) {
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
@@ -349,16 +366,11 @@ void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS
, kq, kq
);
- for(int tier = 0; tier < storage_tiers ; tier++)
+ for(size_t tier = 0; tier < storage_tiers ; tier++)
buffer_sprintf(wb, "%s%zu", tier>0?", ":"", r->internal.tier_points_read[tier]);
buffer_strcat(wb, " ]");
- if((options & RRDR_OPTION_CUSTOM_VARS) && (options & RRDR_OPTION_JSON_WRAP)) {
- buffer_sprintf(wb, ",\n %schart_variables%s: ", kq, kq);
- health_api_v1_chart_custom_variables2json(r->st, wb);
- }
-
buffer_sprintf(wb, ",\n %sresult%s: ", kq, kq);
if(string_value) buffer_strcat(wb, sq);
diff --git a/web/api/formatters/json_wrapper.h b/web/api/formatters/json_wrapper.h
index bfadc883e..91c1475c5 100644
--- a/web/api/formatters/json_wrapper.h
+++ b/web/api/formatters/json_wrapper.h
@@ -7,9 +7,9 @@
#include "web/api/queries/query.h"
-extern void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value,
- RRDR_GROUPING group_method, QUERY_PARAMS *query_params);
-extern void rrdr_json_wrapper_anomaly_rates(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value);
-extern void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value);
+void rrdr_json_wrapper_begin(RRDR *r, BUFFER *wb, uint32_t format, RRDR_OPTIONS options, int string_value,
+ RRDR_GROUPING group_method);
+void rrdr_json_wrapper_anomaly_rates(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value);
+void rrdr_json_wrapper_end(RRDR *r, BUFFER *wb, uint32_t format, uint32_t options, int string_value);
#endif //NETDATA_API_FORMATTER_JSON_WRAPPER_H
diff --git a/web/api/formatters/rrd2json.c b/web/api/formatters/rrd2json.c
index 7aa478d95..8bf547192 100644
--- a/web/api/formatters/rrd2json.c
+++ b/web/api/formatters/rrd2json.c
@@ -3,129 +3,6 @@
#include "web/api/web_api_v1.h"
#include "database/storage_engine.h"
-static inline void free_single_rrdrim(ONEWAYALLOC *owa, RRDDIM *temp_rd, int archive_mode)
-{
- if (unlikely(!temp_rd))
- return;
-
- onewayalloc_freez(owa, (char *)temp_rd->id);
-
- if (unlikely(archive_mode)) {
- temp_rd->rrdset->counter--;
- if (!temp_rd->rrdset->counter) {
- onewayalloc_freez(owa, (char *)temp_rd->rrdset->name);
- onewayalloc_freez(owa, temp_rd->rrdset->context);
- onewayalloc_freez(owa, temp_rd->rrdset);
- }
- }
-
- for(int tier = 0; tier < storage_tiers ;tier++) {
- if(!temp_rd->tiers[tier]) continue;
-
- if(archive_mode) {
- STORAGE_ENGINE *eng = storage_engine_get(temp_rd->tiers[tier]->mode);
- if (eng)
- eng->api.free(temp_rd->tiers[tier]->db_metric_handle);
- }
-
- onewayalloc_freez(owa, temp_rd->tiers[tier]);
- }
-
- onewayalloc_freez(owa, temp_rd);
-}
-
-static inline void free_rrddim_list(ONEWAYALLOC *owa, RRDDIM *temp_rd, int archive_mode)
-{
- if (unlikely(!temp_rd))
- return;
-
- RRDDIM *t;
- while (temp_rd) {
- t = temp_rd->next;
- free_single_rrdrim(owa, temp_rd, archive_mode);
- temp_rd = t;
- }
-}
-
-void free_context_param_list(ONEWAYALLOC *owa, struct context_param **param_list)
-{
- if (unlikely(!param_list || !*param_list))
- return;
-
- free_rrddim_list(owa, ((*param_list)->rd), (*param_list)->flags & CONTEXT_FLAGS_ARCHIVE);
- onewayalloc_freez(owa, (*param_list));
- *param_list = NULL;
-}
-
-void rebuild_context_param_list(ONEWAYALLOC *owa, struct context_param *context_param_list, time_t after_requested)
-{
- RRDDIM *temp_rd = context_param_list->rd;
- RRDDIM *new_rd_list = NULL, *t;
- int is_archived = (context_param_list->flags & CONTEXT_FLAGS_ARCHIVE);
-
- RRDSET *st = temp_rd->rrdset;
- RRDSET *last_st = st;
- time_t last_entry_t = is_archived ? st->last_entry_t : rrdset_last_entry_t(st);
- time_t last_last_entry_t = last_entry_t;
- while (temp_rd) {
- t = temp_rd->next;
-
- st = temp_rd->rrdset;
- if (st == last_st) {
- last_entry_t = last_last_entry_t;
- }else {
- last_entry_t = is_archived ? st->last_entry_t : rrdset_last_entry_t(st);
- last_last_entry_t = last_entry_t;
- last_st = st;
- }
-
- if (last_entry_t >= after_requested) {
- temp_rd->next = new_rd_list;
- new_rd_list = temp_rd;
- } else
- free_single_rrdrim(owa, temp_rd, is_archived);
- temp_rd = t;
- }
- context_param_list->rd = new_rd_list;
-};
-
-void build_context_param_list(ONEWAYALLOC *owa, struct context_param **param_list, RRDSET *st)
-{
- if (unlikely(!param_list || !st))
- return;
-
- if (unlikely(!(*param_list))) {
- *param_list = onewayalloc_mallocz(owa, sizeof(struct context_param));
- (*param_list)->first_entry_t = LONG_MAX;
- (*param_list)->last_entry_t = 0;
- (*param_list)->flags = CONTEXT_FLAGS_CONTEXT;
- (*param_list)->rd = NULL;
- }
-
- RRDDIM *rd1;
- st->last_accessed_time = now_realtime_sec();
- rrdset_rdlock(st);
-
- (*param_list)->first_entry_t = MIN((*param_list)->first_entry_t, rrdset_first_entry_t_nolock(st));
- (*param_list)->last_entry_t = MAX((*param_list)->last_entry_t, rrdset_last_entry_t_nolock(st));
-
- rrddim_foreach_read(rd1, st) {
- RRDDIM *rd = onewayalloc_memdupz(owa, rd1, sizeof(RRDDIM));
- rd->id = onewayalloc_strdupz(owa, rd1->id);
- rd->name = onewayalloc_strdupz(owa, rd1->name);
- for(int tier = 0; tier < storage_tiers ;tier++) {
- if(rd1->tiers[tier])
- rd->tiers[tier] = onewayalloc_memdupz(owa, rd1->tiers[tier], sizeof(*rd->tiers[tier]));
- else
- rd->tiers[tier] = NULL;
- }
- rd->next = (*param_list)->rd;
- (*param_list)->rd = rd;
- }
-
- rrdset_unlock(st);
-}
-
void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb) {
rrdset2json(st, wb, NULL, NULL, 0);
}
@@ -183,12 +60,12 @@ int rrdset2value_api_v1(
, BUFFER *wb
, NETDATA_DOUBLE *n
, const char *dimensions
- , long points
- , long long after
- , long long before
- , int group_method
+ , size_t points
+ , time_t after
+ , time_t before
+ , RRDR_GROUPING group_method
, const char *group_options
- , long group_time
+ , time_t resampling_time
, uint32_t options
, time_t *db_after
, time_t *db_before
@@ -197,16 +74,27 @@ int rrdset2value_api_v1(
, size_t *result_points_generated
, int *value_is_null
, NETDATA_DOUBLE *anomaly_rate
- , int timeout
- , int tier
+ , time_t timeout
+ , size_t tier
+ , QUERY_SOURCE query_source
) {
int ret = HTTP_RESP_INTERNAL_SERVER_ERROR;
ONEWAYALLOC *owa = onewayalloc_create(0);
-
- RRDR *r = rrd2rrdr(owa, st, points, after, before,
- group_method, group_time, options, dimensions, NULL,
- group_options, timeout, tier);
+ RRDR *r = rrd2rrdr_legacy(
+ owa,
+ st,
+ points,
+ after,
+ before,
+ group_method,
+ resampling_time,
+ options,
+ dimensions,
+ group_options,
+ timeout,
+ tier,
+ query_source);
if(!r) {
if(value_is_null) *value_is_null = 1;
@@ -218,7 +106,7 @@ int rrdset2value_api_v1(
*db_points_read += r->internal.db_points_read;
if(db_points_per_tier) {
- for(int t = 0; t < storage_tiers ;t++)
+ for(size_t t = 0; t < storage_tiers ;t++)
db_points_per_tier[t] += r->internal.tier_points_read[t];
}
@@ -244,50 +132,19 @@ int rrdset2value_api_v1(
if(db_after) *db_after = r->after;
if(db_before) *db_before = r->before;
- long i = (!(options & RRDR_OPTION_REVERSED))?rrdr_rows(r) - 1:0;
- *n = rrdr2value(r, i, options, value_is_null, anomaly_rate, NULL);
+ long i = (!(options & RRDR_OPTION_REVERSED))?(long)rrdr_rows(r) - 1:0;
+ *n = rrdr2value(r, i, options, value_is_null, anomaly_rate);
ret = HTTP_RESP_OK;
cleanup:
- if(r) rrdr_free(owa, r);
+ rrdr_free(owa, r);
onewayalloc_destroy(owa);
return ret;
}
-int rrdset2anything_api_v1(
- ONEWAYALLOC *owa
- , RRDSET *st
- , QUERY_PARAMS *query_params
- , BUFFER *dimensions
- , uint32_t format
- , long points
- , long long after
- , long long before
- , int group_method
- , const char *group_options
- , long group_time
- , uint32_t options
- , time_t *latest_timestamp
- , int tier
-)
-{
- BUFFER *wb = query_params->wb;
- if (query_params->context_param_list && !(query_params->context_param_list->flags & CONTEXT_FLAGS_ARCHIVE))
- st->last_accessed_time = now_realtime_sec();
-
- RRDR *r = rrd2rrdr(
- owa,
- st,
- points,
- after,
- before,
- group_method,
- group_time,
- options,
- dimensions ? buffer_tostring(dimensions) : NULL,
- query_params->context_param_list,
- group_options,
- query_params->timeout, tier);
+int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, QUERY_TARGET *qt, time_t *latest_timestamp) {
+
+ RRDR *r = rrd2rrdr(owa, qt);
if(!r) {
buffer_strcat(wb, "Cannot generate output with these parameters on this chart.");
return HTTP_RESP_INTERNAL_SERVER_ERROR;
@@ -298,11 +155,6 @@ int rrdset2anything_api_v1(
return HTTP_RESP_BACKEND_FETCH_FAILED;
}
- if (st->state && st->state->is_ar_chart)
- ml_process_rrdr(r, query_params->max_anomaly_rates);
-
- RRDDIM *temp_rd = query_params->context_param_list ? query_params->context_param_list->rd : NULL;
-
if(r->result_options & RRDR_RESULT_OPTION_RELATIVE)
buffer_no_cacheable(wb);
else if(r->result_options & RRDR_RESULT_OPTION_ABSOLUTE)
@@ -311,85 +163,89 @@ int rrdset2anything_api_v1(
if(latest_timestamp && rrdr_rows(r) > 0)
*latest_timestamp = r->before;
+ DATASOURCE_FORMAT format = qt->request.format;
+ RRDR_OPTIONS options = qt->request.options;
+ RRDR_GROUPING group_method = qt->request.group_method;
+
switch(format) {
case DATASOURCE_SSV:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method, query_params);
- rrdr2ssv(r, wb, options, "", " ", "", temp_rd);
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ rrdr2ssv(r, wb, options, "", " ", "");
rrdr_json_wrapper_end(r, wb, format, options, 1);
}
else {
wb->contenttype = CT_TEXT_PLAIN;
- rrdr2ssv(r, wb, options, "", " ", "", temp_rd);
+ rrdr2ssv(r, wb, options, "", " ", "");
}
break;
case DATASOURCE_SSV_COMMA:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method, query_params);
- rrdr2ssv(r, wb, options, "", ",", "", temp_rd);
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ rrdr2ssv(r, wb, options, "", ",", "");
rrdr_json_wrapper_end(r, wb, format, options, 1);
}
else {
wb->contenttype = CT_TEXT_PLAIN;
- rrdr2ssv(r, wb, options, "", ",", "", temp_rd);
+ rrdr2ssv(r, wb, options, "", ",", "");
}
break;
case DATASOURCE_JS_ARRAY:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method, query_params);
- rrdr2ssv(r, wb, options, "[", ",", "]", temp_rd);
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
+ rrdr2ssv(r, wb, options, "[", ",", "]");
rrdr_json_wrapper_end(r, wb, format, options, 0);
}
else {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr2ssv(r, wb, options, "[", ",", "]", temp_rd);
+ rrdr2ssv(r, wb, options, "[", ",", "]");
}
break;
case DATASOURCE_CSV:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method, query_params);
- rrdr2csv(r, wb, format, options, "", ",", "\\n", "", temp_rd);
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ rrdr2csv(r, wb, format, options, "", ",", "\\n", "");
rrdr_json_wrapper_end(r, wb, format, options, 1);
}
else {
wb->contenttype = CT_TEXT_PLAIN;
- rrdr2csv(r, wb, format, options, "", ",", "\r\n", "", temp_rd);
+ rrdr2csv(r, wb, format, options, "", ",", "\r\n", "");
}
break;
case DATASOURCE_CSV_MARKDOWN:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method, query_params);
- rrdr2csv(r, wb, format, options, "", "|", "\\n", "", temp_rd);
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ rrdr2csv(r, wb, format, options, "", "|", "\\n", "");
rrdr_json_wrapper_end(r, wb, format, options, 1);
}
else {
wb->contenttype = CT_TEXT_PLAIN;
- rrdr2csv(r, wb, format, options, "", "|", "\r\n", "", temp_rd);
+ rrdr2csv(r, wb, format, options, "", "|", "\r\n", "");
}
break;
case DATASOURCE_CSV_JSON_ARRAY:
wb->contenttype = CT_APPLICATION_JSON;
if(options & RRDR_OPTION_JSON_WRAP) {
- rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
buffer_strcat(wb, "[\n");
- rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n", temp_rd);
+ rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n");
buffer_strcat(wb, "\n]");
rrdr_json_wrapper_end(r, wb, format, options, 0);
}
else {
wb->contenttype = CT_APPLICATION_JSON;
buffer_strcat(wb, "[\n");
- rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n", temp_rd);
+ rrdr2csv(r, wb, format, options + RRDR_OPTION_LABEL_QUOTES, "[", ",", "]", ",\n");
buffer_strcat(wb, "\n]");
}
break;
@@ -397,29 +253,29 @@ int rrdset2anything_api_v1(
case DATASOURCE_TSV:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method, query_params);
- rrdr2csv(r, wb, format, options, "", "\t", "\\n", "", temp_rd);
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
+ rrdr2csv(r, wb, format, options, "", "\t", "\\n", "");
rrdr_json_wrapper_end(r, wb, format, options, 1);
}
else {
wb->contenttype = CT_TEXT_PLAIN;
- rrdr2csv(r, wb, format, options, "", "\t", "\r\n", "", temp_rd);
+ rrdr2csv(r, wb, format, options, "", "\t", "\r\n", "");
}
break;
case DATASOURCE_HTML:
if(options & RRDR_OPTION_JSON_WRAP) {
wb->contenttype = CT_APPLICATION_JSON;
- rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 1, group_method);
buffer_strcat(wb, "<html>\\n<center>\\n<table border=\\\"0\\\" cellpadding=\\\"5\\\" cellspacing=\\\"5\\\">\\n");
- rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\\n", "", temp_rd);
+ rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\\n", "");
buffer_strcat(wb, "</table>\\n</center>\\n</html>\\n");
rrdr_json_wrapper_end(r, wb, format, options, 1);
}
else {
wb->contenttype = CT_TEXT_HTML;
buffer_strcat(wb, "<html>\n<center>\n<table border=\"0\" cellpadding=\"5\" cellspacing=\"5\">\n");
- rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\n", "", temp_rd);
+ rrdr2csv(r, wb, format, options, "<tr><td>", "</td><td>", "</td></tr>\n", "");
buffer_strcat(wb, "</table>\n</center>\n</html>\n");
}
break;
@@ -428,9 +284,9 @@ int rrdset2anything_api_v1(
wb->contenttype = CT_APPLICATION_X_JAVASCRIPT;
if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
- rrdr2json(r, wb, options, 1, query_params->context_param_list);
+ rrdr2json(r, wb, options, 1);
if(options & RRDR_OPTION_JSON_WRAP)
rrdr_json_wrapper_end(r, wb, format, options, 0);
@@ -440,9 +296,9 @@ int rrdset2anything_api_v1(
wb->contenttype = CT_APPLICATION_JSON;
if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
- rrdr2json(r, wb, options, 1, query_params->context_param_list);
+ rrdr2json(r, wb, options, 1);
if(options & RRDR_OPTION_JSON_WRAP)
rrdr_json_wrapper_end(r, wb, format, options, 0);
@@ -451,9 +307,9 @@ int rrdset2anything_api_v1(
case DATASOURCE_JSONP:
wb->contenttype = CT_APPLICATION_X_JAVASCRIPT;
if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
- rrdr2json(r, wb, options, 0, query_params->context_param_list);
+ rrdr2json(r, wb, options, 0);
if(options & RRDR_OPTION_JSON_WRAP)
rrdr_json_wrapper_end(r, wb, format, options, 0);
@@ -464,14 +320,14 @@ int rrdset2anything_api_v1(
wb->contenttype = CT_APPLICATION_JSON;
if(options & RRDR_OPTION_JSON_WRAP)
- rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method, query_params);
+ rrdr_json_wrapper_begin(r, wb, format, options, 0, group_method);
- rrdr2json(r, wb, options, 0, query_params->context_param_list);
+ rrdr2json(r, wb, options, 0);
if(options & RRDR_OPTION_JSON_WRAP) {
if(options & RRDR_OPTION_RETURN_JWAR) {
rrdr_json_wrapper_anomaly_rates(r, wb, format, options, 0);
- rrdr2json(r, wb, options | RRDR_OPTION_INTERNAL_AR, 0, query_params->context_param_list);
+ rrdr2json(r, wb, options | RRDR_OPTION_INTERNAL_AR, 0);
}
rrdr_json_wrapper_end(r, wb, format, options, 0);
}
diff --git a/web/api/formatters/rrd2json.h b/web/api/formatters/rrd2json.h
index 6be53ff8a..048281d7e 100644
--- a/web/api/formatters/rrd2json.h
+++ b/web/api/formatters/rrd2json.h
@@ -5,16 +5,6 @@
#include "web/api/web_api_v1.h"
-typedef struct query_params {
- struct context_param *context_param_list;
- BUFFER *wb;
- char *chart_label_key;
- int max_anomaly_rates;
- int timeout;
- int show_dimensions;
-} QUERY_PARAMS;
-
-
#include "web/api/exporters/allmetrics.h"
#include "web/api/queries/rrdr.h"
@@ -34,19 +24,20 @@ typedef struct query_params {
#define API_RELATIVE_TIME_MAX (3 * 365 * 86400)
// type of JSON generations
-#define DATASOURCE_INVALID (-1)
-#define DATASOURCE_JSON 0
-#define DATASOURCE_DATATABLE_JSON 1
-#define DATASOURCE_DATATABLE_JSONP 2
-#define DATASOURCE_SSV 3
-#define DATASOURCE_CSV 4
-#define DATASOURCE_JSONP 5
-#define DATASOURCE_TSV 6
-#define DATASOURCE_HTML 7
-#define DATASOURCE_JS_ARRAY 8
-#define DATASOURCE_SSV_COMMA 9
-#define DATASOURCE_CSV_JSON_ARRAY 10
-#define DATASOURCE_CSV_MARKDOWN 11
+typedef enum {
+ DATASOURCE_JSON = 0,
+ DATASOURCE_DATATABLE_JSON = 1,
+ DATASOURCE_DATATABLE_JSONP = 2,
+ DATASOURCE_SSV = 3,
+ DATASOURCE_CSV = 4,
+ DATASOURCE_JSONP = 5,
+ DATASOURCE_TSV = 6,
+ DATASOURCE_HTML = 7,
+ DATASOURCE_JS_ARRAY = 8,
+ DATASOURCE_SSV_COMMA = 9,
+ DATASOURCE_CSV_JSON_ARRAY = 10,
+ DATASOURCE_CSV_MARKDOWN = 11,
+} DATASOURCE_FORMAT;
#define DATASOURCE_FORMAT_JSON "json"
#define DATASOURCE_FORMAT_DATATABLE_JSON "datatable"
@@ -61,37 +52,22 @@ typedef struct query_params {
#define DATASOURCE_FORMAT_CSV_JSON_ARRAY "csvjsonarray"
#define DATASOURCE_FORMAT_CSV_MARKDOWN "markdown"
-extern void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb);
-extern void rrdr_buffer_print_format(BUFFER *wb, uint32_t format);
+void rrd_stats_api_v1_chart(RRDSET *st, BUFFER *wb);
+void rrdr_buffer_print_format(BUFFER *wb, uint32_t format);
-extern int rrdset2anything_api_v1(
- ONEWAYALLOC *owa
- , RRDSET *st
- , QUERY_PARAMS *query_params
- , BUFFER *dimensions
- , uint32_t format
- , long points
- , long long after
- , long long before
- , int group_method
- , const char *group_options
- , long group_time
- , uint32_t options
- , time_t *latest_timestamp
- , int tier
-);
+int data_query_execute(ONEWAYALLOC *owa, BUFFER *wb, struct query_target *qt, time_t *latest_timestamp);
-extern int rrdset2value_api_v1(
+int rrdset2value_api_v1(
RRDSET *st
, BUFFER *wb
, NETDATA_DOUBLE *n
, const char *dimensions
- , long points
- , long long after
- , long long before
- , int group_method
+ , size_t points
+ , time_t after
+ , time_t before
+ , RRDR_GROUPING group_method
, const char *group_options
- , long group_time
+ , time_t resampling_time
, uint32_t options
, time_t *db_after
, time_t *db_before
@@ -100,12 +76,9 @@ extern int rrdset2value_api_v1(
, size_t *result_points_generated
, int *value_is_null
, NETDATA_DOUBLE *anomaly_rate
- , int timeout
- , int tier
+ , time_t timeout
+ , size_t tier
+ , QUERY_SOURCE query_source
);
-extern void build_context_param_list(ONEWAYALLOC *owa, struct context_param **param_list, RRDSET *st);
-extern void rebuild_context_param_list(ONEWAYALLOC *owa, struct context_param *context_param_list, time_t after_requested);
-extern void free_context_param_list(ONEWAYALLOC *owa, struct context_param **param_list);
-
#endif /* NETDATA_RRD2JSON_H */
diff --git a/web/api/formatters/rrdset2json.c b/web/api/formatters/rrdset2json.c
index de8d87bae..1e8106335 100644
--- a/web/api/formatters/rrdset2json.c
+++ b/web/api/formatters/rrdset2json.c
@@ -4,7 +4,7 @@
void chart_labels2json(RRDSET *st, BUFFER *wb, size_t indentation)
{
- if(unlikely(!st->state || !st->state->chart_labels))
+ if(unlikely(!st->rrdlabels))
return;
char tabs[11];
@@ -18,17 +18,15 @@ void chart_labels2json(RRDSET *st, BUFFER *wb, size_t indentation)
indentation--;
}
- rrdlabels_to_buffer(st->state->chart_labels, wb, tabs, ":", "\"", ",\n", NULL, NULL, NULL, NULL);
+ rrdlabels_to_buffer(st->rrdlabels, wb, tabs, ":", "\"", ",\n", NULL, NULL, NULL, NULL);
buffer_strcat(wb, "\n");
}
// generate JSON for the /api/v1/chart API call
void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memory_used, int skip_volatile) {
- rrdset_rdlock(st);
-
- time_t first_entry_t = rrdset_first_entry_t_nolock(st);
- time_t last_entry_t = rrdset_last_entry_t_nolock(st);
+ time_t first_entry_t = rrdset_first_entry_t(st);
+ time_t last_entry_t = rrdset_last_entry_t(st);
buffer_sprintf(
wb,
@@ -45,18 +43,18 @@ void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memor
"\t\t\t\"units\": \"%s\",\n"
"\t\t\t\"data_url\": \"/api/v1/data?chart=%s\",\n"
"\t\t\t\"chart_type\": \"%s\",\n",
- st->id,
- st->name,
- st->type,
- st->family,
- st->context,
- st->title,
- st->name,
+ rrdset_id(st),
+ rrdset_name(st),
+ rrdset_parts_type(st),
+ rrdset_family(st),
+ rrdset_context(st),
+ rrdset_title(st),
+ rrdset_name(st),
st->priority,
- st->plugin_name ? st->plugin_name : "",
- st->module_name ? st->module_name : "",
- st->units,
- st->name,
+ rrdset_plugin_name(st),
+ rrdset_module_name(st),
+ rrdset_units(st),
+ rrdset_name(st),
rrdset_type_name(st->chart_type));
if (likely(!skip_volatile))
@@ -90,7 +88,7 @@ void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memor
size_t dimensions = 0;
RRDDIM *rd;
rrddim_foreach_read(rd, st) {
- if(rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN) || rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) continue;
+ if(rrddim_option_check(rd, RRDDIM_OPTION_HIDDEN) || rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE)) continue;
memory += sizeof(RRDDIM) + rd->memsize;
@@ -98,13 +96,14 @@ void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memor
buffer_strcat(wb, ",\n\t\t\t\t\"");
else
buffer_strcat(wb, "\t\t\t\t\"");
- buffer_strcat_jsonescape(wb, rd->id);
+ buffer_strcat_jsonescape(wb, rrddim_id(rd));
buffer_strcat(wb, "\": { \"name\": \"");
- buffer_strcat_jsonescape(wb, rd->name);
+ buffer_strcat_jsonescape(wb, rrddim_name(rd));
buffer_strcat(wb, "\" }");
dimensions++;
}
+ rrddim_foreach_done(rd);
if(dimensions_count) *dimensions_count += dimensions;
if(memory_used) *memory_used += memory;
@@ -121,7 +120,8 @@ void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memor
buffer_strcat(wb, ",\n\t\t\t\"alarms\": {\n");
size_t alarms = 0;
RRDCALC *rc;
- for (rc = st->alarms; rc; rc = rc->rrdset_next) {
+ netdata_rwlock_rdlock(&st->alerts.rwlock);
+ DOUBLE_LINKED_LIST_FOREACH_FORWARD(st->alerts.base, rc, prev, next) {
buffer_sprintf(
wb,
"%s"
@@ -131,23 +131,25 @@ void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memor
"\t\t\t\t\t\"units\": \"%s\",\n"
"\t\t\t\t\t\"update_every\": %d\n"
"\t\t\t\t}",
- (alarms) ? ",\n" : "", rc->name, rc->id, rrdcalc_status2string(rc->status), rc->units,
+ (alarms) ? ",\n" : "", rrdcalc_name(rc), rc->id, rrdcalc_status2string(rc->status), rrdcalc_units(rc),
rc->update_every);
alarms++;
}
+ netdata_rwlock_unlock(&st->alerts.rwlock);
buffer_sprintf(wb,
"\n\t\t\t}"
);
}
buffer_strcat(wb, ",\n\t\t\t\"chart_labels\": {\n");
chart_labels2json(st, wb, 2);
- buffer_strcat(wb, "\t\t\t}\n");
+ buffer_strcat(wb, "\t\t\t}");
+ buffer_strcat(wb, ",\n\t\t\t\"functions\": {\n");
+ chart_functions2json(st, wb, 4, "\"", "\"");
+ buffer_strcat(wb, "\t\t\t}");
buffer_sprintf(wb,
"\n\t\t}"
);
-
- rrdset_unlock(st);
}
diff --git a/web/api/formatters/rrdset2json.h b/web/api/formatters/rrdset2json.h
index 697c84634..b2908e225 100644
--- a/web/api/formatters/rrdset2json.h
+++ b/web/api/formatters/rrdset2json.h
@@ -5,6 +5,6 @@
#include "rrd2json.h"
-extern void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memory_used, int skip_volatile);
+void rrdset2json(RRDSET *st, BUFFER *wb, size_t *dimensions_count, size_t *memory_used, int skip_volatile);
#endif //NETDATA_API_FORMATTER_RRDSET2JSON_H
diff --git a/web/api/formatters/ssv/ssv.c b/web/api/formatters/ssv/ssv.c
index 850182da1..d561980d9 100644
--- a/web/api/formatters/ssv/ssv.c
+++ b/web/api/formatters/ssv/ssv.c
@@ -2,7 +2,7 @@
#include "ssv.h"
-void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, const char *separator, const char *suffix, RRDDIM *temp_rd) {
+void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, const char *separator, const char *suffix) {
//info("RRD2SSV(): %s: BEGIN", r->st->id);
long i;
@@ -17,7 +17,7 @@ void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, con
// for each line in the array
for(i = start; i != end ;i += step) {
int all_values_are_null = 0;
- NETDATA_DOUBLE v = rrdr2value(r, i, options, &all_values_are_null, NULL, temp_rd);
+ NETDATA_DOUBLE v = rrdr2value(r, i, options, &all_values_are_null, NULL);
if(likely(i != start)) {
if(r->min > v) r->min = v;
diff --git a/web/api/formatters/ssv/ssv.h b/web/api/formatters/ssv/ssv.h
index 66716b9c9..f7d4a9548 100644
--- a/web/api/formatters/ssv/ssv.h
+++ b/web/api/formatters/ssv/ssv.h
@@ -5,6 +5,6 @@
#include "../rrd2json.h"
-extern void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, const char *separator, const char *suffix, RRDDIM *temp_rd);
+void rrdr2ssv(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, const char *prefix, const char *separator, const char *suffix);
#endif //NETDATA_API_FORMATTER_SSV_H
diff --git a/web/api/formatters/value/value.c b/web/api/formatters/value/value.c
index 30e00c068..46a71303e 100644
--- a/web/api/formatters/value/value.c
+++ b/web/api/formatters/value/value.c
@@ -3,9 +3,10 @@
#include "value.h"
-inline NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null, NETDATA_DOUBLE *anomaly_rate, RRDDIM *temp_rd) {
+inline NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null, NETDATA_DOUBLE *anomaly_rate) {
+ QUERY_TARGET *qt = r->internal.qt;
long c;
- RRDDIM *d;
+ const long used = qt->query.used;
NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
@@ -20,7 +21,7 @@ inline NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all
int set_min_max = 0;
if(unlikely(options & RRDR_OPTION_PERCENTAGE)) {
total = 0;
- for (c = 0, d = temp_rd ? temp_rd : r->st->dimensions; d && c < r->d; c++, d = d->next) {
+ for (c = 0; c < used; c++) {
NETDATA_DOUBLE n = cn[c];
if(likely((options & RRDR_OPTION_ABSOLUTE) && n < 0))
@@ -34,7 +35,7 @@ inline NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all
}
// for each dimension
- for (c = 0, d = temp_rd ? temp_rd : r->st->dimensions; d && c < r->d; c++, d = d->next) {
+ for (c = 0; c < used; c++) {
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
if(unlikely((options & RRDR_OPTION_NONZERO) && !(r->od[c] & RRDR_DIMENSION_NONZERO))) continue;
@@ -80,7 +81,7 @@ inline NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all
if(anomaly_rate) {
if(!r->d) *anomaly_rate = 0;
- else *anomaly_rate = total_anomaly_rate / r->d;
+ else *anomaly_rate = total_anomaly_rate / (NETDATA_DOUBLE)r->d;
}
if(unlikely(all_null)) {
@@ -100,3 +101,62 @@ inline NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all
return v;
}
+
+QUERY_VALUE rrdmetric2value(RRDHOST *host,
+ struct rrdcontext_acquired *rca, struct rrdinstance_acquired *ria, struct rrdmetric_acquired *rma,
+ time_t after, time_t before,
+ RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options,
+ size_t tier, time_t timeout, QUERY_SOURCE query_source
+) {
+ QUERY_TARGET_REQUEST qtr = {
+ .host = host,
+ .rca = rca,
+ .ria = ria,
+ .rma = rma,
+ .after = after,
+ .before = before,
+ .points = 1,
+ .options = options,
+ .group_method = group_method,
+ .group_options = group_options,
+ .tier = tier,
+ .timeout = timeout,
+ .query_source = query_source,
+ };
+
+ ONEWAYALLOC *owa = onewayalloc_create(16 * 1024);
+ RRDR *r = rrd2rrdr(owa, query_target_create(&qtr));
+
+ QUERY_VALUE qv;
+
+ if(!r || rrdr_rows(r) == 0) {
+ qv = (QUERY_VALUE) {
+ .value = NAN,
+ .anomaly_rate = NAN,
+ };
+ }
+ else {
+ qv = (QUERY_VALUE) {
+ .after = r->after,
+ .before = r->before,
+ .points_read = r->internal.db_points_read,
+ .result_points = r->internal.result_points_generated,
+ };
+
+ for(size_t t = 0; t < storage_tiers ;t++)
+ qv.storage_points_per_tier[t] = r->internal.tier_points_read[t];
+
+ long i = (!(options & RRDR_OPTION_REVERSED))?(long)rrdr_rows(r) - 1:0;
+ int all_values_are_null = 0;
+ qv.value = rrdr2value(r, i, options, &all_values_are_null, &qv.anomaly_rate);
+ if(all_values_are_null) {
+ qv.value = NAN;
+ qv.anomaly_rate = NAN;
+ }
+ }
+
+ rrdr_free(owa, r);
+ onewayalloc_destroy(owa);
+
+ return qv;
+}
diff --git a/web/api/formatters/value/value.h b/web/api/formatters/value/value.h
index fc1c7bf08..76b1869f3 100644
--- a/web/api/formatters/value/value.h
+++ b/web/api/formatters/value/value.h
@@ -5,6 +5,27 @@
#include "../rrd2json.h"
-extern NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null, NETDATA_DOUBLE *anomaly_rate, RRDDIM *temp_rd);
+typedef struct storage_value {
+ NETDATA_DOUBLE value;
+ NETDATA_DOUBLE anomaly_rate;
+ time_t after;
+ time_t before;
+ size_t points_read;
+ size_t storage_points_per_tier[RRD_STORAGE_TIERS];
+ size_t result_points;
+} QUERY_VALUE;
+
+struct rrdmetric_acquired;
+struct rrdinstance_acquired;
+struct rrdcontext_acquired;
+
+QUERY_VALUE rrdmetric2value(RRDHOST *host,
+ struct rrdcontext_acquired *rca, struct rrdinstance_acquired *ria, struct rrdmetric_acquired *rma,
+ time_t after, time_t before,
+ RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options,
+ size_t tier, time_t timeout, QUERY_SOURCE query_source
+);
+
+NETDATA_DOUBLE rrdr2value(RRDR *r, long i, RRDR_OPTIONS options, int *all_values_are_null, NETDATA_DOUBLE *anomaly_rate);
#endif //NETDATA_API_FORMATTER_VALUE_H
diff --git a/web/api/health/health_cmdapi.h b/web/api/health/health_cmdapi.h
index d8ec6aaa0..d5309c73f 100644
--- a/web/api/health/health_cmdapi.h
+++ b/web/api/health/health_cmdapi.h
@@ -24,7 +24,7 @@
#define HEALTH_CMDAPI_MSG_STYPEWARNING "WARNING: Added alarm selector to silence/disable alarms without a SILENCE or DISABLE command.\n"
#define HEALTH_CMDAPI_MSG_NOSELECTORWARNING "WARNING: SILENCE or DISABLE command is ineffective without defining any alarm selectors.\n"
-extern int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_mgmt_health(RRDHOST *host, struct web_client *w, char *url);
#include "web/api/web_api_v1.h"
diff --git a/web/api/netdata-swagger.json b/web/api/netdata-swagger.json
index 029783b55..cb2b4809c 100644
--- a/web/api/netdata-swagger.json
+++ b/web/api/netdata-swagger.json
@@ -573,7 +573,6 @@
"unaligned",
"match-ids",
"match-names",
- "showcustomvars",
"allow_past",
"anomaly-bit"
]
@@ -1629,6 +1628,68 @@
}
}
},
+ "/function": {
+ "get": {
+ "summary": "Execute a collector function.",
+ "parameters": [
+ {
+ "name": "function",
+ "in": "query",
+ "description": "The name of the function, as returned by the collector.",
+ "required": true,
+ "allowEmptyValue": false,
+ "schema": {
+ "type": "string"
+ }
+ },
+ {
+ "name": "timeout",
+ "in": "query",
+ "description": "The timeout in seconds to wait for the function to complete.",
+ "required": false,
+ "schema": {
+ "type": "number",
+ "format": "integer",
+ "default": 10
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "The collector function has been executed successfully. Each collector may return a different type of content."
+ },
+ "400": {
+ "description": "The request was rejected by the collector."
+ },
+ "404": {
+ "description": "The requested function is not found."
+ },
+ "500": {
+ "description": "Other internal error, getting this error means there is a bug in Netdata."
+ },
+ "503": {
+ "description": "The collector to execute the function is not currently available."
+ },
+ "504": {
+ "description": "Timeout while waiting for the collector to execute the function."
+ },
+ "591": {
+ "description": "The collector sent a response, but it was invalid or corrupted."
+ }
+ }
+ }
+ },
+ "/functions": {
+ "get": {
+ "summary": "Get a list of all registered collector functions.",
+ "description": "Collector functions are programs that can be executed on demand.",
+ "responses": {
+ "200": {
+ "description": "A JSON object containing one object per supported function."
+ }
+ }
+ }
+ },
"/weights": {
"get": {
"summary": "Analyze all the metrics using an algorithm and score them accordingly",
diff --git a/web/api/netdata-swagger.yaml b/web/api/netdata-swagger.yaml
index 2e04e9f20..fced6544f 100644
--- a/web/api/netdata-swagger.yaml
+++ b/web/api/netdata-swagger.yaml
@@ -471,7 +471,6 @@ paths:
- unaligned
- match-ids
- match-names
- - showcustomvars
- allow_past
- anomaly-bit
default:
@@ -1351,6 +1350,47 @@ paths:
that correlated the metrics did not produce any result.
"504":
description: Timeout - the query took too long and has been cancelled.
+ /function:
+ get:
+ summary: "Execute a collector function."
+ parameters:
+ - name: function
+ in: query
+ description: The name of the function, as returned by the collector.
+ required: true
+ allowEmptyValue: false
+ schema:
+ type: string
+ - name: timeout
+ in: query
+ description: The timeout in seconds to wait for the function to complete.
+ required: false
+ schema:
+ type: number
+ format: integer
+ default: 10
+ responses:
+ "200":
+ description: The collector function has been executed successfully. Each collector may return a different type of content.
+ "400":
+ description: The request was rejected by the collector.
+ "404":
+ description: The requested function is not found.
+ "500":
+ description: Other internal error, getting this error means there is a bug in Netdata.
+ "503":
+ description: The collector to execute the function is not currently available.
+ "504":
+ description: Timeout while waiting for the collector to execute the function.
+ "591":
+ description: The collector sent a response, but it was invalid or corrupted.
+ /functions:
+ get:
+ summary: Get a list of all registered collector functions.
+ description: Collector functions are programs that can be executed on demand.
+ responses:
+ "200":
+ description: A JSON object containing one object per supported function.
/weights:
get:
summary: "Analyze all the metrics using an algorithm and score them accordingly"
diff --git a/web/api/queries/average/average.h b/web/api/queries/average/average.h
index 55c51722c..b31966886 100644
--- a/web/api/queries/average/average.h
+++ b/web/api/queries/average/average.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_average(RRDR *r, const char *options __maybe_unused);
-extern void grouping_reset_average(RRDR *r);
-extern void grouping_free_average(RRDR *r);
-extern void grouping_add_average(RRDR *r, NETDATA_DOUBLE value);
-extern NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+void grouping_create_average(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_average(RRDR *r);
+void grouping_free_average(RRDR *r);
+void grouping_add_average(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_average(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_AVERAGE_H
diff --git a/web/api/queries/countif/countif.h b/web/api/queries/countif/countif.h
index 0c7d2d7d1..dfe805658 100644
--- a/web/api/queries/countif/countif.h
+++ b/web/api/queries/countif/countif.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_countif(RRDR *r, const char *options __maybe_unused);
-extern void grouping_reset_countif(RRDR *r);
-extern void grouping_free_countif(RRDR *r);
-extern void grouping_add_countif(RRDR *r, NETDATA_DOUBLE value);
-extern NETDATA_DOUBLE grouping_flush_countif(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+void grouping_create_countif(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_countif(RRDR *r);
+void grouping_free_countif(RRDR *r);
+void grouping_add_countif(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_countif(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_COUNTIF_H
diff --git a/web/api/queries/des/des.h b/web/api/queries/des/des.h
index 8906d14eb..05fa01b34 100644
--- a/web/api/queries/des/des.h
+++ b/web/api/queries/des/des.h
@@ -6,12 +6,12 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_init_des(void);
+void grouping_init_des(void);
-extern void grouping_create_des(RRDR *r, const char *options __maybe_unused);
-extern void grouping_reset_des(RRDR *r);
-extern void grouping_free_des(RRDR *r);
-extern void grouping_add_des(RRDR *r, NETDATA_DOUBLE value);
-extern NETDATA_DOUBLE grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+void grouping_create_des(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_des(RRDR *r);
+void grouping_free_des(RRDR *r);
+void grouping_add_des(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_des(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERIES_DES_H
diff --git a/web/api/queries/incremental_sum/incremental_sum.h b/web/api/queries/incremental_sum/incremental_sum.h
index 6d908cef6..c24507fcf 100644
--- a/web/api/queries/incremental_sum/incremental_sum.h
+++ b/web/api/queries/incremental_sum/incremental_sum.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_incremental_sum(RRDR *r, const char *options __maybe_unused);
-extern void grouping_reset_incremental_sum(RRDR *r);
-extern void grouping_free_incremental_sum(RRDR *r);
-extern void grouping_add_incremental_sum(RRDR *r, NETDATA_DOUBLE value);
-extern NETDATA_DOUBLE grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+void grouping_create_incremental_sum(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_incremental_sum(RRDR *r);
+void grouping_free_incremental_sum(RRDR *r);
+void grouping_add_incremental_sum(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_incremental_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_INCREMENTAL_SUM_H
diff --git a/web/api/queries/max/max.h b/web/api/queries/max/max.h
index 28913686b..e2427d26d 100644
--- a/web/api/queries/max/max.h
+++ b/web/api/queries/max/max.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_max(RRDR *r, const char *options __maybe_unused);
-extern void grouping_reset_max(RRDR *r);
-extern void grouping_free_max(RRDR *r);
-extern void grouping_add_max(RRDR *r, NETDATA_DOUBLE value);
-extern NETDATA_DOUBLE grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+void grouping_create_max(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_max(RRDR *r);
+void grouping_free_max(RRDR *r);
+void grouping_add_max(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_max(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_MAX_H
diff --git a/web/api/queries/median/median.h b/web/api/queries/median/median.h
index dd1b3de61..9fc159db4 100644
--- a/web/api/queries/median/median.h
+++ b/web/api/queries/median/median.h
@@ -6,18 +6,18 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_median(RRDR *r, const char *options);
-extern void grouping_create_trimmed_median1(RRDR *r, const char *options);
-extern void grouping_create_trimmed_median2(RRDR *r, const char *options);
-extern void grouping_create_trimmed_median3(RRDR *r, const char *options);
-extern void grouping_create_trimmed_median5(RRDR *r, const char *options);
-extern void grouping_create_trimmed_median10(RRDR *r, const char *options);
-extern void grouping_create_trimmed_median15(RRDR *r, const char *options);
-extern void grouping_create_trimmed_median20(RRDR *r, const char *options);
-extern void grouping_create_trimmed_median25(RRDR *r, const char *options);
-extern void grouping_reset_median(RRDR *r);
-extern void grouping_free_median(RRDR *r);
-extern void grouping_add_median(RRDR *r, NETDATA_DOUBLE value);
-extern NETDATA_DOUBLE grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+void grouping_create_median(RRDR *r, const char *options);
+void grouping_create_trimmed_median1(RRDR *r, const char *options);
+void grouping_create_trimmed_median2(RRDR *r, const char *options);
+void grouping_create_trimmed_median3(RRDR *r, const char *options);
+void grouping_create_trimmed_median5(RRDR *r, const char *options);
+void grouping_create_trimmed_median10(RRDR *r, const char *options);
+void grouping_create_trimmed_median15(RRDR *r, const char *options);
+void grouping_create_trimmed_median20(RRDR *r, const char *options);
+void grouping_create_trimmed_median25(RRDR *r, const char *options);
+void grouping_reset_median(RRDR *r);
+void grouping_free_median(RRDR *r);
+void grouping_add_median(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_median(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERIES_MEDIAN_H
diff --git a/web/api/queries/min/min.h b/web/api/queries/min/min.h
index b8627f667..dcdfe252f 100644
--- a/web/api/queries/min/min.h
+++ b/web/api/queries/min/min.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_min(RRDR *r, const char *options __maybe_unused);
-extern void grouping_reset_min(RRDR *r);
-extern void grouping_free_min(RRDR *r);
-extern void grouping_add_min(RRDR *r, NETDATA_DOUBLE value);
-extern NETDATA_DOUBLE grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+void grouping_create_min(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_min(RRDR *r);
+void grouping_free_min(RRDR *r);
+void grouping_add_min(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_min(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_MIN_H
diff --git a/web/api/queries/percentile/percentile.h b/web/api/queries/percentile/percentile.h
index 709717ebd..65e335c11 100644
--- a/web/api/queries/percentile/percentile.h
+++ b/web/api/queries/percentile/percentile.h
@@ -6,18 +6,18 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_percentile25(RRDR *r, const char *options);
-extern void grouping_create_percentile50(RRDR *r, const char *options);
-extern void grouping_create_percentile75(RRDR *r, const char *options);
-extern void grouping_create_percentile80(RRDR *r, const char *options);
-extern void grouping_create_percentile90(RRDR *r, const char *options);
-extern void grouping_create_percentile95(RRDR *r, const char *options);
-extern void grouping_create_percentile97(RRDR *r, const char *options);
-extern void grouping_create_percentile98(RRDR *r, const char *options);
-extern void grouping_create_percentile99(RRDR *r, const char *options );
-extern void grouping_reset_percentile(RRDR *r);
-extern void grouping_free_percentile(RRDR *r);
-extern void grouping_add_percentile(RRDR *r, NETDATA_DOUBLE value);
-extern NETDATA_DOUBLE grouping_flush_percentile(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+void grouping_create_percentile25(RRDR *r, const char *options);
+void grouping_create_percentile50(RRDR *r, const char *options);
+void grouping_create_percentile75(RRDR *r, const char *options);
+void grouping_create_percentile80(RRDR *r, const char *options);
+void grouping_create_percentile90(RRDR *r, const char *options);
+void grouping_create_percentile95(RRDR *r, const char *options);
+void grouping_create_percentile97(RRDR *r, const char *options);
+void grouping_create_percentile98(RRDR *r, const char *options);
+void grouping_create_percentile99(RRDR *r, const char *options );
+void grouping_reset_percentile(RRDR *r);
+void grouping_free_percentile(RRDR *r);
+void grouping_add_percentile(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_percentile(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERIES_PERCENTILE_H
diff --git a/web/api/queries/query.c b/web/api/queries/query.c
index d776f6d11..ccd195135 100644
--- a/web/api/queries/query.c
+++ b/web/api/queries/query.c
@@ -658,71 +658,6 @@ static void rrdr_set_grouping_function(RRDR *r, RRDR_GROUPING group_method) {
}
// ----------------------------------------------------------------------------
-
-static void rrdr_disable_not_selected_dimensions(RRDR *r, RRDR_OPTIONS options, const char *dims,
- struct context_param *context_param_list)
-{
- RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
- int should_lock = (!context_param_list || !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE));
-
- if (should_lock)
- rrdset_check_rdlock(r->st);
-
- if(unlikely(!dims || !*dims || (dims[0] == '*' && dims[1] == '\0'))) return;
-
- int match_ids = 0, match_names = 0;
-
- if(unlikely(options & RRDR_OPTION_MATCH_IDS))
- match_ids = 1;
- if(unlikely(options & RRDR_OPTION_MATCH_NAMES))
- match_names = 1;
-
- if(likely(!match_ids && !match_names))
- match_ids = match_names = 1;
-
- SIMPLE_PATTERN *pattern = simple_pattern_create(dims, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
-
- RRDDIM *d;
- long c, dims_selected = 0, dims_not_hidden_not_zero = 0;
- for(c = 0, d = temp_rd?temp_rd:r->st->dimensions; d ;c++, d = d->next) {
- if( (match_ids && simple_pattern_matches(pattern, d->id))
- || (match_names && simple_pattern_matches(pattern, d->name))
- ) {
- r->od[c] |= RRDR_DIMENSION_SELECTED;
- if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) r->od[c] &= ~RRDR_DIMENSION_HIDDEN;
- dims_selected++;
-
- // since the user needs this dimension
- // make it appear as NONZERO, to return it
- // even if the dimension has only zeros
- // unless option non_zero is set
- if(unlikely(!(options & RRDR_OPTION_NONZERO)))
- r->od[c] |= RRDR_DIMENSION_NONZERO;
-
- // count the visible dimensions
- if(likely(r->od[c] & RRDR_DIMENSION_NONZERO))
- dims_not_hidden_not_zero++;
- }
- else {
- r->od[c] |= RRDR_DIMENSION_HIDDEN;
- if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED;
- }
- }
- simple_pattern_free(pattern);
-
- // check if all dimensions are hidden
- if(unlikely(!dims_not_hidden_not_zero && dims_selected)) {
- // there are a few selected dimensions,
- // but they are all zero
- // enable the selected ones
- // to avoid returning an empty chart
- for(c = 0, d = temp_rd?temp_rd:r->st->dimensions; d ;c++, d = d->next)
- if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED))
- r->od[c] |= RRDR_DIMENSION_NONZERO;
- }
-}
-
-// ----------------------------------------------------------------------------
// helpers to find our way in RRDR
static inline RRDR_VALUE_FLAGS *UNUSED_FUNCTION(rrdr_line_options)(RRDR *r, long rrdr_line) {
@@ -736,13 +671,13 @@ static inline NETDATA_DOUBLE *UNUSED_FUNCTION(rrdr_line_values)(RRDR *r, long rr
static inline long rrdr_line_init(RRDR *r, time_t t, long rrdr_line) {
rrdr_line++;
- internal_error(rrdr_line >= r->n,
- "QUERY: requested to step above RRDR size for chart '%s'",
- r->st->name);
+ internal_error(rrdr_line >= (long)r->n,
+ "QUERY: requested to step above RRDR size for query '%s'",
+ r->internal.qt->id);
internal_error(r->t[rrdr_line] != 0 && r->t[rrdr_line] != t,
- "QUERY: overwriting the timestamp of RRDR line %zu from %zu to %zu, of chart '%s'",
- (size_t)rrdr_line, (size_t)r->t[rrdr_line], (size_t)t, r->st->name);
+ "QUERY: overwriting the timestamp of RRDR line %zu from %zu to %zu, of query '%s'",
+ (size_t)rrdr_line, (size_t)r->t[rrdr_line], (size_t)t, r->internal.qt->id);
// save the time
r->t[rrdr_line] = t;
@@ -758,68 +693,137 @@ static inline void rrdr_done(RRDR *r, long rrdr_line) {
// ----------------------------------------------------------------------------
// tier management
-static int rrddim_find_best_tier_for_timeframe(RRDDIM *rd, time_t after_wanted, time_t before_wanted, long points_wanted) {
+static bool query_metric_is_valid_tier(QUERY_METRIC *qm, size_t tier) {
+ if(!qm->tiers[tier].db_metric_handle || !qm->tiers[tier].db_first_time_t || !qm->tiers[tier].db_last_time_t || !qm->tiers[tier].db_update_every)
+ return false;
+
+ return true;
+}
+
+static size_t query_metric_first_working_tier(QUERY_METRIC *qm) {
+ for(size_t tier = 0; tier < storage_tiers ; tier++) {
+
+ // find the db time-range for this tier for all metrics
+ STORAGE_METRIC_HANDLE *db_metric_handle = qm->tiers[tier].db_metric_handle;
+ time_t first_t = qm->tiers[tier].db_first_time_t;
+ time_t last_t = qm->tiers[tier].db_last_time_t;
+ time_t update_every = qm->tiers[tier].db_update_every;
+
+ if(!db_metric_handle || !first_t || !last_t || !update_every)
+ continue;
+
+ return tier;
+ }
+
+ return 0;
+}
+
+static long query_plan_points_coverage_weight(time_t db_first_t, time_t db_last_t, time_t db_update_every, time_t after_wanted, time_t before_wanted, size_t points_wanted, size_t tier __maybe_unused) {
+ if(db_first_t == 0 || db_last_t == 0 || db_update_every == 0)
+ return -LONG_MAX;
+
+ time_t common_first_t = MAX(db_first_t, after_wanted);
+ time_t common_last_t = MIN(db_last_t, before_wanted);
+
+ long time_coverage = (common_last_t - common_first_t) * 1000000 / (before_wanted - after_wanted);
+ size_t points_wanted_in_coverage = points_wanted * time_coverage / 1000000;
+
+ long points_available = (common_last_t - common_first_t) / db_update_every;
+ long points_delta = (long)(points_available - points_wanted_in_coverage);
+ long points_coverage = (points_delta < 0) ? (long)(points_available * time_coverage / points_wanted_in_coverage) : time_coverage;
+
+ // a way to benefit higher tiers
+ // points_coverage += (long)tier * 10000;
+
+ if(points_available <= 0)
+ return -LONG_MAX;
+
+ return points_coverage;
+}
+
+static size_t query_metric_best_tier_for_timeframe(QUERY_METRIC *qm, time_t after_wanted, time_t before_wanted, size_t points_wanted) {
if(unlikely(storage_tiers < 2))
return 0;
- if(unlikely(after_wanted == before_wanted || points_wanted <= 0 || !rd || !rd->rrdset)) {
+ if(unlikely(after_wanted == before_wanted || points_wanted <= 0))
+ return query_metric_first_working_tier(qm);
- if(!rd)
- internal_error(true, "QUERY: NULL dimension - invalid params to tier calculation");
- else
- internal_error(true, "QUERY: chart '%s' dimension '%s' invalid params to tier calculation",
- (rd->rrdset)?rd->rrdset->name:"unknown", rd->name);
+ long weight[storage_tiers];
- return 0;
+ for(size_t tier = 0; tier < storage_tiers ; tier++) {
+
+ // find the db time-range for this tier for all metrics
+ STORAGE_METRIC_HANDLE *db_metric_handle = qm->tiers[tier].db_metric_handle;
+ time_t first_t = qm->tiers[tier].db_first_time_t;
+ time_t last_t = qm->tiers[tier].db_last_time_t;
+ time_t update_every = qm->tiers[tier].db_update_every;
+
+ if(!db_metric_handle || !first_t || !last_t || !update_every) {
+ weight[tier] = -LONG_MAX;
+ continue;
+ }
+
+ weight[tier] = query_plan_points_coverage_weight(first_t, last_t, update_every, after_wanted, before_wanted, points_wanted, tier);
}
- //BUFFER *wb = buffer_create(1000);
- //buffer_sprintf(wb, "Best tier for chart '%s', dim '%s', from %ld to %ld (dur %ld, every %d), points %ld",
- // rd->rrdset->name, rd->name, after_wanted, before_wanted, before_wanted - after_wanted, rd->update_every, points_wanted);
+ size_t best_tier = 0;
+ for(size_t tier = 1; tier < storage_tiers ; tier++) {
+ if(weight[tier] >= weight[best_tier])
+ best_tier = tier;
+ }
+
+ return best_tier;
+}
+
+static size_t rrddim_find_best_tier_for_timeframe(QUERY_TARGET *qt, time_t after_wanted, time_t before_wanted, size_t points_wanted) {
+ if(unlikely(storage_tiers < 2))
+ return 0;
+
+ if(unlikely(after_wanted == before_wanted || points_wanted <= 0)) {
+ internal_error(true, "QUERY: '%s' has invalid params to tier calculation", qt->id);
+ return 0;
+ }
long weight[storage_tiers];
- for(int tier = 0; tier < storage_tiers ; tier++) {
- if(unlikely(!rd->tiers[tier])) {
- internal_error(true, "QUERY: tier %d of chart '%s' dimension '%s' not initialized",
- tier, rd->rrdset->name, rd->name);
- // buffer_free(wb);
- return 0;
- }
+ for(size_t tier = 0; tier < storage_tiers ; tier++) {
- time_t first_t = rd->tiers[tier]->query_ops.oldest_time(rd->tiers[tier]->db_metric_handle);
- time_t last_t = rd->tiers[tier]->query_ops.latest_time(rd->tiers[tier]->db_metric_handle);
+ time_t common_first_t = 0;
+ time_t common_last_t = 0;
+ time_t common_update_every = 0;
- time_t common_after = MAX(first_t, after_wanted);
- time_t common_before = MIN(last_t, before_wanted);
+ // find the db time-range for this tier for all metrics
+ for(size_t i = 0, used = qt->query.used; i < used ; i++) {
+ QUERY_METRIC *qm = &qt->query.array[i];
- long time_coverage = (common_before - common_after) * 1000 / (before_wanted - after_wanted);
- if(time_coverage < 0) time_coverage = 0;
+ time_t first_t = qm->tiers[tier].db_first_time_t;
+ time_t last_t = qm->tiers[tier].db_last_time_t;
+ time_t update_every = qm->tiers[tier].db_update_every;
- int update_every = (int)rd->tiers[tier]->tier_grouping * (int)rd->update_every;
- if(unlikely(update_every == 0)) {
- internal_error(true, "QUERY: update_every of tier %d for chart '%s' dimension '%s' is zero. tg = %d, ue = %d",
- tier, rd->rrdset->name, rd->name, rd->tiers[tier]->tier_grouping, rd->update_every);
- // buffer_free(wb);
- return 0;
- }
+ if(!first_t || !last_t || !update_every)
+ continue;
- long points_available = (before_wanted - after_wanted) / update_every;
- long points_delta = points_available - points_wanted;
- long points_coverage = (points_delta < 0) ? points_available * 1000 / points_wanted: 1000;
+ if(!common_first_t)
+ common_first_t = first_t;
+ else
+ common_first_t = MIN(first_t, common_first_t);
- if(points_available <= 0)
- weight[tier] = -LONG_MAX;
- else
- weight[tier] = points_coverage;
+ if(!common_last_t)
+ common_last_t = last_t;
+ else
+ common_last_t = MAX(last_t, common_last_t);
- // buffer_sprintf(wb, ": tier %d, first %ld, last %ld (dur %ld, tg %d, every %d), points %ld, tcoverage %ld, pcoverage %ld, weight %ld",
- // tier, first_t, last_t, last_t - first_t, rd->tiers[tier]->tier_grouping, update_every,
- // points_available, time_coverage, points_coverage, weight[tier]);
+ if(!common_update_every)
+ common_update_every = update_every;
+ else
+ common_update_every = MIN(update_every, common_update_every);
+ }
+
+ weight[tier] = query_plan_points_coverage_weight(common_first_t, common_last_t, common_update_every, after_wanted, before_wanted, points_wanted, tier);
}
- int best_tier = 0;
- for(int tier = 1; tier < storage_tiers ; tier++) {
+ size_t best_tier = 0;
+ for(size_t tier = 1; tier < storage_tiers ; tier++) {
if(weight[tier] >= weight[best_tier])
best_tier = tier;
}
@@ -827,47 +831,30 @@ static int rrddim_find_best_tier_for_timeframe(RRDDIM *rd, time_t after_wanted,
if(weight[best_tier] == -LONG_MAX)
best_tier = 0;
- //buffer_sprintf(wb, ": final best tier %d", best_tier);
- //internal_error(true, "%s", buffer_tostring(wb));
- //buffer_free(wb);
-
return best_tier;
}
-static int rrdset_find_natural_update_every_for_timeframe(RRDSET *st, time_t after_wanted, time_t before_wanted, long points_wanted, RRDR_OPTIONS options, int tier) {
- int ret = st->update_every;
-
- if(unlikely(!st->dimensions))
- return ret;
-
- rrdset_rdlock(st);
- int best_tier;
-
- if(options & RRDR_OPTION_SELECTED_TIER && tier >= 0 && tier < storage_tiers)
+static time_t rrdset_find_natural_update_every_for_timeframe(QUERY_TARGET *qt, time_t after_wanted, time_t before_wanted, size_t points_wanted, RRDR_OPTIONS options, size_t tier) {
+ size_t best_tier;
+ if((options & RRDR_OPTION_SELECTED_TIER) && tier < storage_tiers)
best_tier = tier;
else
- best_tier = rrddim_find_best_tier_for_timeframe(st->dimensions, after_wanted, before_wanted, points_wanted);
+ best_tier = rrddim_find_best_tier_for_timeframe(qt, after_wanted, before_wanted, points_wanted);
- if(!st->dimensions->tiers[best_tier]) {
- internal_error(
- true,
- "QUERY: tier %d on chart '%s', is not initialized", best_tier, st->name);
- }
- else {
- ret = (int)st->dimensions->tiers[best_tier]->tier_grouping * (int)st->update_every;
- if(unlikely(!ret)) {
- internal_error(
- true,
- "QUERY: update_every calculated to be zero on chart '%s', tier_grouping %d, update_every %d",
- st->name, st->dimensions->tiers[best_tier]->tier_grouping, st->update_every);
-
- ret = st->update_every;
- }
- }
+ // find the db minimum update every for this tier for all metrics
+ time_t common_update_every = default_rrd_update_every;
+ for(size_t i = 0, used = qt->query.used; i < used ; i++) {
+ QUERY_METRIC *qm = &qt->query.array[i];
- rrdset_unlock(st);
+ time_t update_every = qm->tiers[best_tier].db_update_every;
- return ret;
+ if(!i)
+ common_update_every = update_every;
+ else
+ common_update_every = MIN(update_every, common_update_every);
+ }
+
+ return common_update_every;
}
// ----------------------------------------------------------------------------
@@ -915,7 +902,7 @@ typedef struct query_plan {
typedef struct query_engine_ops {
// configuration
RRDR *r;
- RRDDIM *rd;
+ QUERY_METRIC *qm;
time_t view_update_every;
time_t query_granularity;
TIER_QUERY_FETCH tier_query_fetch;
@@ -927,11 +914,11 @@ typedef struct query_engine_ops {
// storage queries
size_t tier;
- struct rrddim_tier *tier_ptr;
- struct rrddim_query_handle handle;
- STORAGE_POINT (*next_metric)(struct rrddim_query_handle *handle);
- int (*is_finished)(struct rrddim_query_handle *handle);
- void (*finalize)(struct rrddim_query_handle *handle);
+ struct query_metric_tier *tier_ptr;
+ struct storage_engine_query_handle handle;
+ STORAGE_POINT (*next_metric)(struct storage_engine_query_handle *handle);
+ int (*is_finished)(struct storage_engine_query_handle *handle);
+ void (*finalize)(struct storage_engine_query_handle *handle);
// aggregating points over time
void (*grouping_add)(struct rrdresult *r, NETDATA_DOUBLE value);
@@ -963,11 +950,11 @@ static void query_planer_activate_plan(QUERY_ENGINE_OPS *ops, size_t plan_id, ti
after = overwrite_after;
ops->tier = ops->plan.data[plan_id].tier;
- ops->tier_ptr = ops->rd->tiers[ops->tier];
- ops->tier_ptr->query_ops.init(ops->tier_ptr->db_metric_handle, &ops->handle, after, before, ops->r->internal.tier_query_fetch);
- ops->next_metric = ops->tier_ptr->query_ops.next_metric;
- ops->is_finished = ops->tier_ptr->query_ops.is_finished;
- ops->finalize = ops->tier_ptr->query_ops.finalize;
+ ops->tier_ptr = &ops->qm->tiers[ops->tier];
+ ops->tier_ptr->eng->api.query_ops.init(ops->tier_ptr->db_metric_handle, &ops->handle, after, before);
+ ops->next_metric = ops->tier_ptr->eng->api.query_ops.next_metric;
+ ops->is_finished = ops->tier_ptr->eng->api.query_ops.is_finished;
+ ops->finalize = ops->tier_ptr->eng->api.query_ops.finalize;
ops->current_plan = plan_id;
ops->current_plan_expire_time = ops->plan.data[plan_id].before;
}
@@ -976,26 +963,38 @@ static void query_planer_next_plan(QUERY_ENGINE_OPS *ops, time_t now, time_t las
internal_error(now < ops->current_plan_expire_time && now < ops->plan.data[ops->current_plan].before,
"QUERY: switching query plan too early!");
+ size_t old_plan = ops->current_plan;
+
time_t next_plan_before_time;
do {
ops->current_plan++;
if (ops->current_plan >= ops->plan.entries) {
- ops->current_plan = ops->plan.entries - 1;
+ ops->current_plan = old_plan;
+ ops->current_plan_expire_time = ops->r->internal.qt->window.before;
+ // let the query run with current plan
+ // we will not switch it
return;
}
next_plan_before_time = ops->plan.data[ops->current_plan].before;
} while(now >= next_plan_before_time || last_point_end_time >= next_plan_before_time);
+ if(!query_metric_is_valid_tier(ops->qm, ops->plan.data[ops->current_plan].tier)) {
+ ops->current_plan = old_plan;
+ ops->current_plan_expire_time = ops->r->internal.qt->window.before;
+ return;
+ }
+
if(ops->finalize) {
ops->finalize(&ops->handle);
ops->finalize = NULL;
+ ops->is_finished = NULL;
}
- query_planer_activate_plan(ops, ops->current_plan, MIN(now, last_point_end_time));
-
// internal_error(true, "QUERY: switched plan to %zu (all is %zu), previous expiration was %ld, this starts at %ld, now is %ld, last_point_end_time %ld", ops->current_plan, ops->plan.entries, ops->plan.data[ops->current_plan-1].before, ops->plan.data[ops->current_plan].after, now, last_point_end_time);
+
+ query_planer_activate_plan(ops, ops->current_plan, MIN(now, last_point_end_time));
}
static int compare_query_plan_entries_on_start_time(const void *a, const void *b) {
@@ -1004,21 +1003,20 @@ static int compare_query_plan_entries_on_start_time(const void *a, const void *b
return (p1->after < p2->after)?-1:1;
}
-static void query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before_wanted, long points_wanted) {
- RRDDIM *rd = ops->rd;
-
+static bool query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before_wanted, size_t points_wanted) {
//BUFFER *wb = buffer_create(1000);
//buffer_sprintf(wb, "QUERY PLAN for chart '%s' dimension '%s', from %ld to %ld:", rd->rrdset->name, rd->name, after_wanted, before_wanted);
// put our selected tier as the first plan
size_t selected_tier;
- if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER && ops->r->internal.query_tier >= 0 && ops->r->internal.query_tier < storage_tiers) {
- selected_tier = ops->r->internal.query_tier;
+ if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER
+ && ops->r->internal.qt->window.tier < storage_tiers
+ && query_metric_is_valid_tier(ops->qm, ops->r->internal.qt->window.tier)) {
+ selected_tier = ops->r->internal.qt->window.tier;
}
else {
-
- selected_tier = rrddim_find_best_tier_for_timeframe(rd, after_wanted, before_wanted, points_wanted);
+ selected_tier = query_metric_best_tier_for_timeframe(ops->qm, after_wanted, before_wanted, points_wanted);
if(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)
ops->r->internal.query_options &= ~RRDR_OPTION_SELECTED_TIER;
@@ -1026,8 +1024,8 @@ static void query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before
ops->plan.entries = 1;
ops->plan.data[0].tier = selected_tier;
- ops->plan.data[0].after = rd->tiers[selected_tier]->query_ops.oldest_time(rd->tiers[selected_tier]->db_metric_handle);
- ops->plan.data[0].before = rd->tiers[selected_tier]->query_ops.latest_time(rd->tiers[selected_tier]->db_metric_handle);
+ ops->plan.data[0].after = ops->qm->tiers[selected_tier].db_first_time_t;
+ ops->plan.data[0].before = ops->qm->tiers[selected_tier].db_last_time_t;
if(!(ops->r->internal.query_options & RRDR_OPTION_SELECTED_TIER)) {
// the selected tier
@@ -1039,9 +1037,12 @@ static void query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before
// check if our selected tier can start the query
if (selected_tier_first_time_t > after_wanted) {
// we need some help from other tiers
- for (int tr = (int)selected_tier + 1; tr < storage_tiers; tr++) {
+ for (size_t tr = (int)selected_tier + 1; tr < storage_tiers; tr++) {
+ if(!query_metric_is_valid_tier(ops->qm, tr))
+ continue;
+
// find the first time of this tier
- time_t first_time_t = rd->tiers[tr]->query_ops.oldest_time(rd->tiers[tr]->db_metric_handle);
+ time_t first_time_t = ops->qm->tiers[tr].db_first_time_t;
//buffer_sprintf(wb, ": EVAL AFTER tier %d, %ld", tier, first_time_t);
@@ -1067,8 +1068,11 @@ static void query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before
if (selected_tier_last_time_t < before_wanted) {
// we need some help from other tiers
for (int tr = (int)selected_tier - 1; tr >= 0; tr--) {
+ if(!query_metric_is_valid_tier(ops->qm, tr))
+ continue;
+
// find the last time of this tier
- time_t last_time_t = rd->tiers[tr]->query_ops.latest_time(rd->tiers[tr]->db_metric_handle);
+ time_t last_time_t = ops->qm->tiers[tr].db_last_time_t;
//buffer_sprintf(wb, ": EVAL BEFORE tier %d, %ld", tier, last_time_t);
@@ -1096,8 +1100,11 @@ static void query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before
qsort(&ops->plan.data, ops->plan.entries, sizeof(QUERY_PLAN_ENTRY), compare_query_plan_entries_on_start_time);
// make sure it has the whole timeframe we need
- ops->plan.data[0].after = after_wanted;
- ops->plan.data[ops->plan.entries - 1].before = before_wanted;
+ if(ops->plan.data[0].after < after_wanted)
+ ops->plan.data[0].after = after_wanted;
+
+ if(ops->plan.data[ops->plan.entries - 1].before > before_wanted)
+ ops->plan.data[ops->plan.entries - 1].before = before_wanted;
//buffer_sprintf(wb, ": FINAL STEPS %zu", ops->plan.entries);
@@ -1106,7 +1113,12 @@ static void query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before
//internal_error(true, "%s", buffer_tostring(wb));
+ if(!query_metric_is_valid_tier(ops->qm, ops->plan.data[0].tier))
+ return false;
+
query_planer_activate_plan(ops, 0, 0);
+
+ return true;
}
@@ -1146,14 +1158,17 @@ static void query_plan(QUERY_ENGINE_OPS *ops, time_t after_wanted, time_t before
(ops).group_anomaly_rate += (point).anomaly; \
} while(0)
-static inline void rrd2rrdr_do_dimension(
- RRDR *r
- , long points_wanted
- , RRDDIM *rd
- , long dim_id_in_rrdr
- , time_t after_wanted
- , time_t before_wanted
-){
+static inline void rrd2rrdr_do_dimension(RRDR *r, size_t dim_id_in_rrdr) {
+ QUERY_TARGET *qt = r->internal.qt;
+ QUERY_METRIC *qm = &qt->query.array[dim_id_in_rrdr];
+ size_t points_wanted = qt->window.points;
+ time_t after_wanted = qt->window.after;
+ time_t before_wanted = qt->window.before;
+
+// bool debug_this = false;
+// if(strcmp("user", string2str(rd->id)) == 0 && strcmp("system.cpu", string2str(rd->rrdset->id)) == 0)
+// debug_this = true;
+
time_t max_date = 0,
min_date = 0;
@@ -1161,19 +1176,20 @@ static inline void rrd2rrdr_do_dimension(
QUERY_ENGINE_OPS ops = {
.r = r,
- .rd = rd,
+ .qm = qm,
.grouping_add = r->internal.grouping_add,
.grouping_flush = r->internal.grouping_flush,
.tier_query_fetch = r->internal.tier_query_fetch,
.view_update_every = r->update_every,
- .query_granularity = r->update_every / r->group,
+ .query_granularity = (time_t)(r->update_every / r->group),
.group_value_flags = RRDR_VALUE_NOTHING
};
long rrdr_line = -1;
bool use_anomaly_bit_as_value = (r->internal.query_options & RRDR_OPTION_ANOMALY_BIT) ? true : false;
- query_plan(&ops, after_wanted, before_wanted, points_wanted);
+ if(!query_plan(&ops, after_wanted, before_wanted, points_wanted))
+ return;
NETDATA_DOUBLE min = r->min, max = r->max;
@@ -1184,15 +1200,18 @@ static inline void rrd2rrdr_do_dimension(
time_t now_start_time = after_wanted - ops.query_granularity;
time_t now_end_time = after_wanted + ops.view_update_every - ops.query_granularity;
+ size_t db_points_read_since_plan_switch = 0; (void)db_points_read_since_plan_switch;
+
// The main loop, based on the query granularity we need
- for( ; (long)points_added < points_wanted ; now_start_time = now_end_time, now_end_time += ops.view_update_every) {
+ for( ; points_added < points_wanted ; now_start_time = now_end_time, now_end_time += ops.view_update_every) {
- if(query_plan_should_switch_plan(ops, now_end_time))
+ if(unlikely(query_plan_should_switch_plan(ops, now_end_time))) {
query_planer_next_plan(&ops, now_end_time, new_point.end_time);
+ db_points_read_since_plan_switch = 0;
+ }
// read all the points of the db, prior to the time we need (now_end_time)
-
size_t count_same_end_time = 0;
while(count_same_end_time < 100) {
if(likely(count_same_end_time == 0)) {
@@ -1208,11 +1227,15 @@ static inline void rrd2rrdr_do_dimension(
new_point = QUERY_POINT_EMPTY;
new_point.start_time = last1_point.end_time;
new_point.end_time = now_end_time;
+//
+// if(debug_this) info("QUERY: is finished() returned true");
+//
break;
}
// fetch the new point
{
+ db_points_read_since_plan_switch++;
STORAGE_POINT sp = ops.next_metric(&ops.handle);
ops.db_points_read_per_tier[ops.tier]++;
@@ -1223,6 +1246,10 @@ static inline void rrd2rrdr_do_dimension(
new_point.anomaly = sp.count ? (NETDATA_DOUBLE)sp.anomaly_count * 100.0 / (NETDATA_DOUBLE)sp.count : 0.0;
query_point_set_id(new_point, ops.db_total_points_read);
+// if(debug_this)
+// info("QUERY: got point %zu, from time %ld to %ld // now from %ld to %ld // query from %ld to %ld",
+// new_point.id, new_point.start_time, new_point.end_time, now_start_time, now_end_time, after_wanted, before_wanted);
+//
// set the right value to the point we got
if(likely(!storage_point_is_unset(sp) && !storage_point_is_empty(sp))) {
@@ -1258,17 +1285,18 @@ static inline void rrd2rrdr_do_dimension(
// check if the db is giving us zero duration points
if(unlikely(new_point.start_time == new_point.end_time)) {
- internal_error(true, "QUERY: next_metric(%s, %s) returned point %zu start time %ld, end time %ld, that are both equal",
- rd->rrdset->name, rd->name, new_point.id, new_point.start_time, new_point.end_time);
+ internal_error(true, "QUERY: '%s', dimension '%s' next_metric() returned point %zu start time %ld, end time %ld, that are both equal",
+ qt->id, string2str(qm->dimension.id), new_point.id, new_point.start_time, new_point.end_time);
- new_point.start_time = new_point.end_time - ((time_t)ops.tier_ptr->tier_grouping * (time_t)ops.rd->update_every);
+ new_point.start_time = new_point.end_time - ops.tier_ptr->db_update_every;
}
// check if the db is advancing the query
if(unlikely(new_point.end_time <= last1_point.end_time)) {
- internal_error(true, "QUERY: next_metric(%s, %s) returned point %zu from %ld time %ld, before the last point %zu end time %ld, now is %ld to %ld",
- rd->rrdset->name, rd->name, new_point.id, new_point.start_time, new_point.end_time,
- last1_point.id, last1_point.end_time, now_start_time, now_end_time);
+ internal_error(db_points_read_since_plan_switch > 1,
+ "QUERY: '%s', dimension '%s' next_metric() returned point %zu from %ld to %ld, before the last point %zu from %ld to %ld, now is %ld to %ld",
+ qt->id, string2str(qm->dimension.id), new_point.id, new_point.start_time, new_point.end_time,
+ last1_point.id, last1_point.start_time, last1_point.end_time, now_start_time, now_end_time);
count_same_end_time++;
continue;
@@ -1294,8 +1322,8 @@ static inline void rrd2rrdr_do_dimension(
// we only log if this is not point 1
internal_error(new_point.end_time < after_wanted && new_point.id > 1,
- "QUERY: next_metric(%s, %s) returned point %zu from %ld time %ld, which is entirely before our current timeframe %ld to %ld (and before the entire query, after %ld, before %ld)",
- rd->rrdset->name, rd->name,
+ "QUERY: '%s', dimension '%s' next_metric() returned point %zu from %ld time %ld, which is entirely before our current timeframe %ld to %ld (and before the entire query, after %ld, before %ld)",
+ qt->id, string2str(qm->dimension.id),
new_point.id, new_point.start_time, new_point.end_time,
now_start_time, now_end_time,
after_wanted, before_wanted);
@@ -1311,8 +1339,8 @@ static inline void rrd2rrdr_do_dimension(
if(unlikely(count_same_end_time)) {
internal_error(true,
- "QUERY: the database does not advance the query, it returned an end time less or equal to the end time of the last point we got %ld, %zu times",
- last1_point.end_time, count_same_end_time);
+ "QUERY: '%s', dimension '%s', the database does not advance the query, it returned an end time less or equal to the end time of the last point we got %ld, %zu times",
+ qt->id, string2str(qm->dimension.id), last1_point.end_time, count_same_end_time);
if(unlikely(new_point.end_time <= last1_point.end_time))
new_point.end_time = now_end_time;
@@ -1323,7 +1351,7 @@ static inline void rrd2rrdr_do_dimension(
// we select the one to use based on their timestamps
size_t iterations = 0;
- for ( ; now_end_time <= new_point.end_time && (long)points_added < points_wanted ;
+ for ( ; now_end_time <= new_point.end_time && points_added < points_wanted ;
now_end_time += ops.view_update_every, iterations++) {
// now_start_time is wrong in this loop
@@ -1336,22 +1364,35 @@ static inline void rrd2rrdr_do_dimension(
current_point = new_point;
query_interpolate_point(current_point, last1_point, now_end_time);
- internal_error(current_point.id > 0 && last1_point.id == 0 && current_point.end_time > after_wanted && current_point.end_time > now_end_time,
- "QUERY: on '%s', dim '%s', after %ld, before %ld, view update every %ld, query granularity %ld,"
- " interpolating point %zu (from %ld to %ld) at %ld, but we could really favor by having last_point1 in this query.",
- rd->rrdset->name, rd->name, after_wanted, before_wanted, ops.view_update_every, ops.query_granularity,
- current_point.id, current_point.start_time, current_point.end_time, now_end_time);
+// internal_error(current_point.id > 0
+// && last1_point.id == 0
+// && current_point.end_time > after_wanted
+// && current_point.end_time > now_end_time,
+// "QUERY: '%s', dimension '%s', after %ld, before %ld, view update every %ld,"
+// " query granularity %ld, interpolating point %zu (from %ld to %ld) at %ld,"
+// " but we could really favor by having last_point1 in this query.",
+// qt->id, string2str(qm->dimension.id),
+// after_wanted, before_wanted,
+// ops.view_update_every, ops.query_granularity,
+// current_point.id, current_point.start_time, current_point.end_time,
+// now_end_time);
}
else if(likely(now_end_time <= last1_point.end_time)) {
// our LAST point is still valid
current_point = last1_point;
query_interpolate_point(current_point, last2_point, now_end_time);
- internal_error(current_point.id > 0 && last2_point.id == 0 && current_point.end_time > after_wanted && current_point.end_time > now_end_time,
- "QUERY: on '%s', dim '%s', after %ld, before %ld, view update every %ld, query granularity %ld,"
- " interpolating point %zu (from %ld to %ld) at %ld, but we could really favor by having last_point2 in this query.",
- rd->rrdset->name, rd->name, after_wanted, before_wanted, ops.view_update_every, ops.query_granularity,
- current_point.id, current_point.start_time, current_point.end_time, now_end_time);
+// internal_error(current_point.id > 0
+// && last2_point.id == 0
+// && current_point.end_time > after_wanted
+// && current_point.end_time > now_end_time,
+// "QUERY: '%s', dimension '%s', after %ld, before %ld, view update every %ld,"
+// " query granularity %ld, interpolating point %zu (from %ld to %ld) at %ld,"
+// " but we could really favor by having last_point2 in this query.",
+// qt->id, string2str(qm->dimension.id),
+// after_wanted, before_wanted, ops.view_update_every, ops.query_granularity,
+// current_point.id, current_point.start_time, current_point.end_time,
+// now_end_time);
}
else {
// a GAP, we don't have a value this time
@@ -1414,7 +1455,7 @@ static inline void rrd2rrdr_do_dimension(
r->internal.result_points_generated += points_added;
r->internal.db_points_read += ops.db_total_points_read;
- for(int tr = 0; tr < storage_tiers ; tr++)
+ for(size_t tr = 0; tr < storage_tiers ; tr++)
r->internal.tier_points_read[tr] += ops.db_points_read_per_tier[tr];
r->min = min;
@@ -1423,24 +1464,26 @@ static inline void rrd2rrdr_do_dimension(
r->after = min_date - ops.view_update_every + ops.query_granularity;
rrdr_done(r, rrdr_line);
- internal_error((long)points_added != points_wanted,
- "QUERY: query on %s/%s requested %zu points, but RRDR added %zu (%zu db points read).",
- r->st->name, rd->name, (size_t)points_wanted, (size_t)points_added, ops.db_total_points_read);
+ internal_error(points_added != points_wanted,
+ "QUERY: '%s', dimension '%s', requested %zu points, but RRDR added %zu (%zu db points read).",
+ qt->id, string2str(qm->dimension.id),
+ (size_t)points_wanted, (size_t)points_added, ops.db_total_points_read);
}
// ----------------------------------------------------------------------------
// fill the gap of a tier
-extern void store_metric_at_tier(RRDDIM *rd, struct rrddim_tier *t, STORAGE_POINT sp, usec_t now_ut);
+void store_metric_at_tier(RRDDIM *rd, size_t tier, struct rrddim_tier *t, STORAGE_POINT sp, usec_t now_ut);
+void store_metric_collection_completed(void);
-void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, int tier, time_t now) {
- if(unlikely(tier < 0 || tier >= storage_tiers)) return;
+void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, size_t tier, time_t now) {
+ if(unlikely(tier >= storage_tiers)) return;
if(storage_tiers_backfill[tier] == RRD_BACKFILL_NONE) return;
struct rrddim_tier *t = rd->tiers[tier];
if(unlikely(!t)) return;
- time_t latest_time_t = t->query_ops.latest_time(t->db_metric_handle);
+ time_t latest_time_t = t->query_ops->latest_time(t->db_metric_handle);
time_t granularity = (time_t)t->tier_grouping * (time_t)rd->update_every;
time_t time_diff = now - latest_time_t;
@@ -1450,43 +1493,40 @@ void rrdr_fill_tier_gap_from_smaller_tiers(RRDDIM *rd, int tier, time_t now) {
// there is really nothing we can do
if(now <= latest_time_t || time_diff < granularity) return;
- struct rrddim_query_handle handle;
-
- size_t all_points_read = 0;
+ struct storage_engine_query_handle handle;
// for each lower tier
- for(int tr = tier - 1; tr >= 0 ;tr--){
- time_t smaller_tier_first_time = rd->tiers[tr]->query_ops.oldest_time(rd->tiers[tr]->db_metric_handle);
- time_t smaller_tier_last_time = rd->tiers[tr]->query_ops.latest_time(rd->tiers[tr]->db_metric_handle);
+ for(int tr = (int)tier - 1; tr >= 0 ;tr--){
+ time_t smaller_tier_first_time = rd->tiers[tr]->query_ops->oldest_time(rd->tiers[tr]->db_metric_handle);
+ time_t smaller_tier_last_time = rd->tiers[tr]->query_ops->latest_time(rd->tiers[tr]->db_metric_handle);
if(smaller_tier_last_time <= latest_time_t) continue; // it is as bad as we are
long after_wanted = (latest_time_t < smaller_tier_first_time) ? smaller_tier_first_time : latest_time_t;
long before_wanted = smaller_tier_last_time;
struct rrddim_tier *tmp = rd->tiers[tr];
- tmp->query_ops.init(tmp->db_metric_handle, &handle, after_wanted, before_wanted, TIER_QUERY_FETCH_AVERAGE);
+ tmp->query_ops->init(tmp->db_metric_handle, &handle, after_wanted, before_wanted);
- size_t points = 0;
+ size_t points_read = 0;
- while(!tmp->query_ops.is_finished(&handle)) {
+ while(!tmp->query_ops->is_finished(&handle)) {
- STORAGE_POINT sp = tmp->query_ops.next_metric(&handle);
+ STORAGE_POINT sp = tmp->query_ops->next_metric(&handle);
+ points_read++;
if(sp.end_time > latest_time_t) {
latest_time_t = sp.end_time;
- store_metric_at_tier(rd, t, sp, sp.end_time * USEC_PER_SEC);
- points++;
+ store_metric_at_tier(rd, tr, t, sp, sp.end_time * USEC_PER_SEC);
}
}
- all_points_read += points;
- tmp->query_ops.finalize(&handle);
+ tmp->query_ops->finalize(&handle);
+ store_metric_collection_completed();
+ global_statistics_backfill_query_completed(points_read);
//internal_error(true, "DBENGINE: backfilled chart '%s', dimension '%s', tier %d, from %ld to %ld, with %zu points from tier %d",
// rd->rrdset->name, rd->name, tier, after_wanted, before_wanted, points, tr);
}
-
- rrdr_query_completed(all_points_read, all_points_read);
}
// ----------------------------------------------------------------------------
@@ -1497,29 +1537,33 @@ static void rrd2rrdr_log_request_response_metadata(RRDR *r
, RRDR_OPTIONS options __maybe_unused
, RRDR_GROUPING group_method
, bool aligned
- , long group
- , long resampling_time
- , long resampling_group
+ , size_t group
+ , time_t resampling_time
+ , size_t resampling_group
, time_t after_wanted
, time_t after_requested
, time_t before_wanted
, time_t before_requested
- , long points_requested
- , long points_wanted
+ , size_t points_requested
+ , size_t points_wanted
//, size_t after_slot
//, size_t before_slot
, const char *msg
) {
- netdata_rwlock_rdlock(&r->st->rrdset_rwlock);
- info("INTERNAL ERROR: rrd2rrdr() on %s update every %d with %s grouping %s (group: %ld, resampling_time: %ld, resampling_group: %ld), "
- "after (got: %zu, want: %zu, req: %ld, db: %zu), "
- "before (got: %zu, want: %zu, req: %ld, db: %zu), "
- "duration (got: %zu, want: %zu, req: %ld, db: %zu), "
- //"slot (after: %zu, before: %zu, delta: %zu), "
- "points (got: %ld, want: %ld, req: %ld, db: %ld), "
+
+ time_t first_entry_t = r->internal.qt->db.first_time_t;
+ time_t last_entry_t = r->internal.qt->db.last_time_t;
+
+ internal_error(
+ true,
+ "rrd2rrdr() on %s update every %ld with %s grouping %s (group: %zu, resampling_time: %ld, resampling_group: %zu), "
+ "after (got: %ld, want: %ld, req: %ld, db: %ld), "
+ "before (got: %ld, want: %ld, req: %ld, db: %ld), "
+ "duration (got: %ld, want: %ld, req: %ld, db: %ld), "
+ "points (got: %zu, want: %zu, req: %zu), "
"%s"
- , r->st->name
- , r->st->update_every
+ , r->internal.qt->id
+ , r->internal.qt->window.query_granularity
// grouping
, (aligned) ? "aligned" : "unaligned"
@@ -1529,45 +1573,36 @@ static void rrd2rrdr_log_request_response_metadata(RRDR *r
, resampling_group
// after
- , (size_t)r->after
- , (size_t)after_wanted
+ , r->after
+ , after_wanted
, after_requested
- , (size_t)rrdset_first_entry_t_nolock(r->st)
+ , first_entry_t
// before
- , (size_t)r->before
- , (size_t)before_wanted
+ , r->before
+ , before_wanted
, before_requested
- , (size_t)rrdset_last_entry_t_nolock(r->st)
+ , last_entry_t
// duration
- , (size_t)(r->before - r->after + r->st->update_every)
- , (size_t)(before_wanted - after_wanted + r->st->update_every)
- , before_requested - after_requested
- , (size_t)((rrdset_last_entry_t_nolock(r->st) - rrdset_first_entry_t_nolock(r->st)) + r->st->update_every)
-
- // slot
- /*
- , after_slot
- , before_slot
- , (after_slot > before_slot) ? (r->st->entries - after_slot + before_slot) : (before_slot - after_slot)
- */
+ , (long)(r->before - r->after + r->internal.qt->window.query_granularity)
+ , (long)(before_wanted - after_wanted + r->internal.qt->window.query_granularity)
+ , (long)before_requested - after_requested
+ , (long)((last_entry_t - first_entry_t) + r->internal.qt->window.query_granularity)
// points
, r->rows
, points_wanted
, points_requested
- , r->st->entries
// message
, msg
);
- netdata_rwlock_unlock(&r->st->rrdset_rwlock);
}
#endif // NETDATA_INTERNAL_CHECKS
// Returns 1 if an absolute period was requested or 0 if it was a relative period
-int rrdr_relative_window_to_absolute(long long *after, long long *before) {
+bool rrdr_relative_window_to_absolute(time_t *after, time_t *before) {
time_t now = now_realtime_sec() - 1;
int absolute_period_requested = -1;
@@ -1624,10 +1659,25 @@ int rrdr_relative_window_to_absolute(long long *after, long long *before) {
after_requested -= delta;
}
+ time_t absolute_minimum_time = now - (10 * 365 * 86400);
+ time_t absolute_maximum_time = now + (1 * 365 * 86400);
+
+ if (after_requested < absolute_minimum_time && !unittest_running)
+ after_requested = absolute_minimum_time;
+
+ if (after_requested > absolute_maximum_time && !unittest_running)
+ after_requested = absolute_maximum_time;
+
+ if (before_requested < absolute_minimum_time && !unittest_running)
+ before_requested = absolute_minimum_time;
+
+ if (before_requested > absolute_maximum_time && !unittest_running)
+ before_requested = absolute_maximum_time;
+
*before = before_requested;
*after = after_requested;
- return absolute_period_requested;
+ return (absolute_period_requested != 1);
}
// #define DEBUG_QUERY_LOGIC 1
@@ -1636,7 +1686,7 @@ int rrdr_relative_window_to_absolute(long long *after, long long *before) {
#define query_debug_log_init() BUFFER *debug_log = buffer_create(1000)
#define query_debug_log(args...) buffer_sprintf(debug_log, ##args)
#define query_debug_log_fin() { \
- info("QUERY: chart '%s', after:%lld, before:%lld, duration:%lld, points:%ld, res:%ld - wanted => after:%lld, before:%lld, points:%ld, group:%ld, granularity:%ld, resgroup:%ld, resdiv:" NETDATA_DOUBLE_FORMAT_AUTO " %s", st->name, after_requested, before_requested, before_requested - after_requested, points_requested, resampling_time_requested, after_wanted, before_wanted, points_wanted, group, query_granularity, resampling_group, resampling_divisor, buffer_tostring(debug_log)); \
+ info("QUERY: '%s', after:%ld, before:%ld, duration:%ld, points:%zu, res:%ld - wanted => after:%ld, before:%ld, points:%zu, group:%zu, granularity:%ld, resgroup:%ld, resdiv:" NETDATA_DOUBLE_FORMAT_AUTO " %s", qt->id, after_requested, before_requested, before_requested - after_requested, points_requested, resampling_time_requested, after_wanted, before_wanted, points_wanted, group, query_granularity, resampling_group, resampling_divisor, buffer_tostring(debug_log)); \
buffer_free(debug_log); \
debug_log = NULL; \
}
@@ -1648,21 +1698,18 @@ int rrdr_relative_window_to_absolute(long long *after, long long *before) {
#define query_debug_log_free() debug_dummy()
#endif
-RRDR *rrd2rrdr(
- ONEWAYALLOC *owa
- , RRDSET *st
- , long points_requested
- , long long after_requested
- , long long before_requested
- , RRDR_GROUPING group_method
- , long resampling_time_requested
- , RRDR_OPTIONS options
- , const char *dimensions
- , struct context_param *context_param_list
- , const char *group_options
- , int timeout
- , int tier
-) {
+bool query_target_calculate_window(QUERY_TARGET *qt) {
+ if (unlikely(!qt)) return false;
+
+ size_t points_requested = (long)qt->request.points;
+ time_t after_requested = qt->request.after;
+ time_t before_requested = qt->request.before;
+ RRDR_GROUPING group_method = qt->request.group_method;
+ time_t resampling_time_requested = qt->request.resampling_time;
+ RRDR_OPTIONS options = qt->request.options;
+ size_t tier = qt->request.tier;
+ time_t update_every = qt->db.minimum_latest_update_every;
+
// RULES
// points_requested = 0
// the user wants all the natural points the database has
@@ -1676,10 +1723,9 @@ RRDR *rrd2rrdr(
// when natural points are wanted, the query has to be aligned to the update_every
// of the database
- long points_wanted = points_requested;
- long long after_wanted = after_requested;
- long long before_wanted = before_requested;
- int update_every = st->update_every;
+ size_t points_wanted = points_requested;
+ time_t after_wanted = after_requested;
+ time_t before_wanted = before_requested;
bool aligned = !(options & RRDR_OPTION_NOT_ALIGNED);
bool automatic_natural_points = (points_wanted == 0);
@@ -1689,13 +1735,7 @@ RRDR *rrd2rrdr(
query_debug_log_init();
- // make sure points_wanted is positive
- if(points_wanted < 0) {
- points_wanted = -points_wanted;
- query_debug_log(":-points_wanted %ld", points_wanted);
- }
-
- if(ABS(before_requested) <= API_RELATIVE_TIME_MAX || ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
+ if (ABS(before_requested) <= API_RELATIVE_TIME_MAX || ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
relative_period_requested = true;
natural_points = true;
options |= RRDR_OPTION_NATURAL_POINTS;
@@ -1703,105 +1743,93 @@ RRDR *rrd2rrdr(
}
// if the user wants virtual points, make sure we do it
- if(options & RRDR_OPTION_VIRTUAL_POINTS)
+ if (options & RRDR_OPTION_VIRTUAL_POINTS)
natural_points = false;
// set the right flag about natural and virtual points
- if(natural_points) {
+ if (natural_points) {
options |= RRDR_OPTION_NATURAL_POINTS;
- if(options & RRDR_OPTION_VIRTUAL_POINTS)
+ if (options & RRDR_OPTION_VIRTUAL_POINTS)
options &= ~RRDR_OPTION_VIRTUAL_POINTS;
}
else {
options |= RRDR_OPTION_VIRTUAL_POINTS;
- if(options & RRDR_OPTION_NATURAL_POINTS)
+ if (options & RRDR_OPTION_NATURAL_POINTS)
options &= ~RRDR_OPTION_NATURAL_POINTS;
}
- if(after_wanted == 0 || before_wanted == 0) {
- // for non-context queries we have to find the duration of the database
- // for context queries we will assume 600 seconds duration
-
- if(!context_param_list) {
- relative_period_requested = true;
-
- rrdset_rdlock(st);
- time_t first_entry_t = rrdset_first_entry_t_nolock(st);
- time_t last_entry_t = rrdset_last_entry_t_nolock(st);
- rrdset_unlock(st);
-
- if(first_entry_t == 0 || last_entry_t == 0) {
- internal_error(true, "QUERY: chart without data detected on '%s'", st->name);
- query_debug_log_free();
- return NULL;
- }
+ if (after_wanted == 0 || before_wanted == 0) {
+ relative_period_requested = true;
- query_debug_log(":first_entry_t %ld, last_entry_t %ld", first_entry_t, last_entry_t);
+ time_t first_entry_t = qt->db.first_time_t;
+ time_t last_entry_t = qt->db.last_time_t;
- if (after_wanted == 0) {
- after_wanted = first_entry_t;
- query_debug_log(":zero after_wanted %lld", after_wanted);
- }
+ if (first_entry_t == 0 || last_entry_t == 0) {
+ internal_error(true, "QUERY: no data detected on query '%s' (db first_entry_t = %ld, last_entry_t = %ld", qt->id, first_entry_t, last_entry_t);
+ query_debug_log_free();
+ return false;
+ }
- if (before_wanted == 0) {
- before_wanted = last_entry_t;
- before_is_aligned_to_db_end = true;
- query_debug_log(":zero before_wanted %lld", before_wanted);
- }
+ query_debug_log(":first_entry_t %ld, last_entry_t %ld", first_entry_t, last_entry_t);
- if(points_wanted == 0) {
- points_wanted = (last_entry_t - first_entry_t) / update_every;
- query_debug_log(":zero points_wanted %ld", points_wanted);
- }
+ if (after_wanted == 0) {
+ after_wanted = first_entry_t;
+ query_debug_log(":zero after_wanted %ld", after_wanted);
}
- // if they are still zero, assume 600
+ if (before_wanted == 0) {
+ before_wanted = last_entry_t;
+ before_is_aligned_to_db_end = true;
+ query_debug_log(":zero before_wanted %ld", before_wanted);
+ }
- if(after_wanted == 0) {
- after_wanted = -600;
- query_debug_log(":zero600 after_wanted %lld", after_wanted);
+ if (points_wanted == 0) {
+ points_wanted = (last_entry_t - first_entry_t) / update_every;
+ query_debug_log(":zero points_wanted %zu", points_wanted);
}
}
- if(points_wanted == 0) {
+ if (points_wanted == 0) {
points_wanted = 600;
- query_debug_log(":zero600 points_wanted %ld", points_wanted);
+ query_debug_log(":zero600 points_wanted %zu", points_wanted);
}
// convert our before_wanted and after_wanted to absolute
rrdr_relative_window_to_absolute(&after_wanted, &before_wanted);
- query_debug_log(":relative2absolute after %lld, before %lld", after_wanted, before_wanted);
+ query_debug_log(":relative2absolute after %ld, before %ld", after_wanted, before_wanted);
- if(natural_points && (options & RRDR_OPTION_SELECTED_TIER) && tier > 0 && storage_tiers > 1) {
- update_every = rrdset_find_natural_update_every_for_timeframe(st, after_wanted, before_wanted, points_wanted, options, tier);
- if(update_every <= 0) update_every = st->update_every;
- query_debug_log(":natural update every %d", update_every);
+ if (natural_points && (options & RRDR_OPTION_SELECTED_TIER) && tier > 0 && storage_tiers > 1) {
+ update_every = rrdset_find_natural_update_every_for_timeframe(
+ qt, after_wanted, before_wanted, points_wanted, options, tier);
+
+ if (update_every <= 0) update_every = qt->db.minimum_latest_update_every;
+ query_debug_log(":natural update every %ld", update_every);
}
// this is the update_every of the query
// it may be different to the update_every of the database
- time_t query_granularity = (natural_points)?update_every:1;
- if(query_granularity <= 0) query_granularity = 1;
+ time_t query_granularity = (natural_points) ? update_every : 1;
+ if (query_granularity <= 0) query_granularity = 1;
query_debug_log(":query_granularity %ld", query_granularity);
// align before_wanted and after_wanted to query_granularity
if (before_wanted % query_granularity) {
before_wanted -= before_wanted % query_granularity;
- query_debug_log(":granularity align before_wanted %lld", before_wanted);
+ query_debug_log(":granularity align before_wanted %ld", before_wanted);
}
if (after_wanted % query_granularity) {
after_wanted -= after_wanted % query_granularity;
- query_debug_log(":granularity align after_wanted %lld", after_wanted);
+ query_debug_log(":granularity align after_wanted %ld", after_wanted);
}
// automatic_natural_points is set when the user wants all the points available in the database
- if(automatic_natural_points) {
+ if (automatic_natural_points) {
points_wanted = (before_wanted - after_wanted + 1) / query_granularity;
- if(unlikely(points_wanted <= 0)) points_wanted = 1;
- query_debug_log(":auto natural points_wanted %ld", points_wanted);
+ if (unlikely(points_wanted <= 0)) points_wanted = 1;
+ query_debug_log(":auto natural points_wanted %zu", points_wanted);
}
time_t duration = before_wanted - after_wanted;
@@ -1810,42 +1838,47 @@ RRDR *rrd2rrdr(
if (unlikely(resampling_time_requested > duration)) {
after_wanted = before_wanted - resampling_time_requested;
duration = before_wanted - after_wanted;
- query_debug_log(":resampling after_wanted %lld", after_wanted);
+ query_debug_log(":resampling after_wanted %ld", after_wanted);
}
// if the duration is not aligned to resampling time
// extend the duration to the past, to avoid a gap at the chart
// only when the missing duration is above 1/10th of a point
- if(resampling_time_requested > query_granularity && duration % resampling_time_requested) {
+ if (resampling_time_requested > query_granularity && duration % resampling_time_requested) {
time_t delta = duration % resampling_time_requested;
- if(delta > resampling_time_requested / 10) {
+ if (delta > resampling_time_requested / 10) {
after_wanted -= resampling_time_requested - delta;
duration = before_wanted - after_wanted;
- query_debug_log(":resampling2 after_wanted %lld", after_wanted);
+ query_debug_log(":resampling2 after_wanted %ld", after_wanted);
}
}
// the available points of the query
- long points_available = (duration + 1) / query_granularity;
- if(unlikely(points_available <= 0)) points_available = 1;
- query_debug_log(":points_available %ld", points_available);
+ size_t points_available = (duration + 1) / query_granularity;
+ if (unlikely(points_available <= 0)) points_available = 1;
+ query_debug_log(":points_available %zu", points_available);
- if(points_wanted > points_available) {
+ if (points_wanted > points_available) {
points_wanted = points_available;
- query_debug_log(":max points_wanted %ld", points_wanted);
+ query_debug_log(":max points_wanted %zu", points_wanted);
+ }
+
+ if(points_wanted > 86400 && !unittest_running) {
+ points_wanted = 86400;
+ query_debug_log(":absolute max points_wanted %zu", points_wanted);
}
// calculate the desired grouping of source data points
- long group = points_available / points_wanted;
- if(group <= 0) group = 1;
+ size_t group = points_available / points_wanted;
+ if (group == 0) group = 1;
// round "group" to the closest integer
- if(points_available % points_wanted > points_wanted / 2)
+ if (points_available % points_wanted > points_wanted / 2)
group++;
- query_debug_log(":group %ld", group);
+ query_debug_log(":group %zu", group);
- if(points_wanted * group * query_granularity < duration) {
+ if (points_wanted * group * query_granularity < (size_t)duration) {
// the grouping we are going to do, is not enough
// to cover the entire duration requested, so
// we have to change the number of points, to make sure we will
@@ -1854,162 +1887,186 @@ RRDR *rrd2rrdr(
// let's see how many points are the optimal
points_wanted = points_available / group;
- if(points_wanted * group < points_available)
+ if (points_wanted * group < points_available)
points_wanted++;
- if(unlikely(points_wanted <= 0))
+ if (unlikely(points_wanted == 0))
points_wanted = 1;
- query_debug_log(":optimal points %ld", points_wanted);
+ query_debug_log(":optimal points %zu", points_wanted);
}
// resampling_time_requested enforces a certain grouping multiple
NETDATA_DOUBLE resampling_divisor = 1.0;
- long resampling_group = 1;
- if(unlikely(resampling_time_requested > query_granularity)) {
+ size_t resampling_group = 1;
+ if (unlikely(resampling_time_requested > query_granularity)) {
// the points we should group to satisfy gtime
resampling_group = resampling_time_requested / query_granularity;
- if(unlikely(resampling_time_requested % query_granularity))
+ if (unlikely(resampling_time_requested % query_granularity))
resampling_group++;
- query_debug_log(":resampling group %ld", resampling_group);
+ query_debug_log(":resampling group %zu", resampling_group);
// adapt group according to resampling_group
- if(unlikely(group < resampling_group)) {
- group = resampling_group; // do not allow grouping below the desired one
- query_debug_log(":group less res %ld", group);
+ if (unlikely(group < resampling_group)) {
+ group = resampling_group; // do not allow grouping below the desired one
+ query_debug_log(":group less res %zu", group);
}
- if(unlikely(group % resampling_group)) {
+ if (unlikely(group % resampling_group)) {
group += resampling_group - (group % resampling_group); // make sure group is multiple of resampling_group
- query_debug_log(":group mod res %ld", group);
+ query_debug_log(":group mod res %zu", group);
}
// resampling_divisor = group / resampling_group;
- resampling_divisor = (NETDATA_DOUBLE)(group * query_granularity) / (NETDATA_DOUBLE)resampling_time_requested;
+ resampling_divisor = (NETDATA_DOUBLE) (group * query_granularity) / (NETDATA_DOUBLE) resampling_time_requested;
query_debug_log(":resampling divisor " NETDATA_DOUBLE_FORMAT, resampling_divisor);
}
// now that we have group, align the requested timeframe to fit it.
- if(aligned && before_wanted % (group * query_granularity)) {
- if(before_is_aligned_to_db_end)
- before_wanted -= before_wanted % (group * query_granularity);
+ if (aligned && before_wanted % (group * query_granularity)) {
+ if (before_is_aligned_to_db_end)
+ before_wanted -= before_wanted % (time_t)(group * query_granularity);
else
- before_wanted += (group * query_granularity) - before_wanted % (group * query_granularity);
- query_debug_log(":align before_wanted %lld", before_wanted);
+ before_wanted += (time_t)(group * query_granularity) - before_wanted % (time_t)(group * query_granularity);
+ query_debug_log(":align before_wanted %ld", before_wanted);
}
- after_wanted = before_wanted - (points_wanted * group * query_granularity) + query_granularity;
- query_debug_log(":final after_wanted %lld", after_wanted);
+ after_wanted = before_wanted - (time_t)(points_wanted * group * query_granularity) + query_granularity;
+ query_debug_log(":final after_wanted %ld", after_wanted);
duration = before_wanted - after_wanted;
query_debug_log(":final duration %ld", duration + 1);
- // check the context query based on the starting time of the query
- if (context_param_list && !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE)) {
- rebuild_context_param_list(owa, context_param_list, after_wanted);
- st = context_param_list->rd ? context_param_list->rd->rrdset : NULL;
-
- if(unlikely(!st))
- return NULL;
- }
+ query_debug_log_fin();
internal_error(points_wanted != duration / (query_granularity * group) + 1,
- "QUERY: points_wanted %ld is not points %ld",
- points_wanted, duration / (query_granularity * group) + 1);
+ "QUERY: points_wanted %zu is not points %zu",
+ points_wanted, (size_t)(duration / (query_granularity * group) + 1));
internal_error(group < resampling_group,
- "QUERY: group %ld is less than the desired group points %ld",
+ "QUERY: group %zu is less than the desired group points %zu",
group, resampling_group);
internal_error(group > resampling_group && group % resampling_group,
- "QUERY: group %ld is not a multiple of the desired group points %ld",
+ "QUERY: group %zu is not a multiple of the desired group points %zu",
group, resampling_group);
// -------------------------------------------------------------------------
- // initialize our result set
- // this also locks the chart for us
+ // update QUERY_TARGET with our calculations
+
+ qt->window.after = after_wanted;
+ qt->window.before = before_wanted;
+ qt->window.relative = relative_period_requested;
+ qt->window.points = points_wanted;
+ qt->window.group = group;
+ qt->window.group_method = group_method;
+ qt->window.group_options = qt->request.group_options;
+ qt->window.query_granularity = query_granularity;
+ qt->window.resampling_group = resampling_group;
+ qt->window.resampling_divisor = resampling_divisor;
+ qt->window.options = options;
+ qt->window.tier = tier;
+ qt->window.aligned = aligned;
+
+ return true;
+}
+
+RRDR *rrd2rrdr_legacy(
+ ONEWAYALLOC *owa,
+ RRDSET *st, size_t points, time_t after, time_t before,
+ RRDR_GROUPING group_method, time_t resampling_time, RRDR_OPTIONS options, const char *dimensions,
+ const char *group_options, time_t timeout, size_t tier, QUERY_SOURCE query_source) {
+
+ QUERY_TARGET_REQUEST qtr = {
+ .st = st,
+ .points = points,
+ .after = after,
+ .before = before,
+ .group_method = group_method,
+ .resampling_time = resampling_time,
+ .options = options,
+ .dimensions = dimensions,
+ .group_options = group_options,
+ .timeout = timeout,
+ .tier = tier,
+ .query_source = query_source,
+ };
+
+ return rrd2rrdr(owa, query_target_create(&qtr));
+}
- RRDR *r = rrdr_create(owa, st, points_wanted, context_param_list);
+RRDR *rrd2rrdr(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
+ if(!qt)
+ return NULL;
+
+ if(!owa) {
+ query_target_release(qt);
+ return NULL;
+ }
+
+ // qt.window members are the WANTED ones.
+ // qt.request members are the REQUESTED ones.
+
+ RRDR *r = rrdr_create(owa, qt);
if(unlikely(!r)) {
- internal_error(true, "QUERY: cannot create RRDR for %s, after=%u, before=%u, duration=%u, points=%ld",
- st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (uint32_t)duration, points_wanted);
+ internal_error(true, "QUERY: cannot create RRDR for %s, after=%ld, before=%ld, points=%zu",
+ qt->id, qt->window.after, qt->window.before, qt->window.points);
return NULL;
}
- if(unlikely(!r->d || !points_wanted)) {
- internal_error(true, "QUERY: returning empty RRDR (no dimensions in RRDSET) for %s, after=%u, before=%u, duration=%zu, points=%ld",
- st->id, (uint32_t)after_wanted, (uint32_t)before_wanted, (size_t)duration, points_wanted);
+ if(unlikely(!r->d || !qt->window.points)) {
+ internal_error(true, "QUERY: returning empty RRDR (no dimensions in RRDSET) for %s, after=%ld, before=%ld, points=%zu",
+ qt->id, qt->window.after, qt->window.before, qt->window.points);
return r;
}
- if(relative_period_requested)
+ if(qt->window.relative)
r->result_options |= RRDR_RESULT_OPTION_RELATIVE;
else
r->result_options |= RRDR_RESULT_OPTION_ABSOLUTE;
- // find how many dimensions we have
- long dimensions_count = r->d;
-
// -------------------------------------------------------------------------
// initialize RRDR
- r->group = group;
- r->update_every = (int)(group * query_granularity);
- r->before = before_wanted;
- r->after = after_wanted;
- r->internal.points_wanted = points_wanted;
- r->internal.resampling_group = resampling_group;
- r->internal.resampling_divisor = resampling_divisor;
- r->internal.query_options = options;
- r->internal.query_tier = tier;
+ r->group = qt->window.group;
+ r->update_every = (int) (qt->window.group * qt->window.query_granularity);
+ r->before = qt->window.before;
+ r->after = qt->window.after;
+ r->internal.points_wanted = qt->window.points;
+ r->internal.resampling_group = qt->window.resampling_group;
+ r->internal.resampling_divisor = qt->window.resampling_divisor;
+ r->internal.query_options = qt->window.options;
// -------------------------------------------------------------------------
// assign the processor functions
- rrdr_set_grouping_function(r, group_method);
+ rrdr_set_grouping_function(r, qt->window.group_method);
// allocate any memory required by the grouping method
- r->internal.grouping_create(r, group_options);
-
-
- // -------------------------------------------------------------------------
- // disable the not-wanted dimensions
-
- if (context_param_list && !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE))
- rrdset_check_rdlock(st);
-
- if(dimensions)
- rrdr_disable_not_selected_dimensions(r, options, dimensions, context_param_list);
-
-
- query_debug_log_fin();
+ r->internal.grouping_create(r, qt->window.group_options);
// -------------------------------------------------------------------------
// do the work for each dimension
time_t max_after = 0, min_before = 0;
- long max_rows = 0;
+ size_t max_rows = 0;
- RRDDIM *first_rd = context_param_list ? context_param_list->rd : st->dimensions;
- RRDDIM *rd;
- long c, dimensions_used = 0, dimensions_nonzero = 0;
+ long dimensions_used = 0, dimensions_nonzero = 0;
struct timeval query_start_time;
struct timeval query_current_time;
- if (timeout) now_realtime_timeval(&query_start_time);
+ if (qt->request.timeout)
+ now_realtime_timeval(&query_start_time);
- for(rd = first_rd, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
+ for(size_t c = 0, max = qt->query.used; c < max ; c++) {
+ // set the query target dimension options to rrdr
+ r->od[c] = qt->query.array[c].dimension.options;
- // if we need a percentage, we need to calculate all dimensions
- if(unlikely(!(options & RRDR_OPTION_PERCENTAGE) && (r->od[c] & RRDR_DIMENSION_HIDDEN))) {
- if(unlikely(r->od[c] & RRDR_DIMENSION_SELECTED)) r->od[c] &= ~RRDR_DIMENSION_SELECTED;
- continue;
- }
r->od[c] |= RRDR_DIMENSION_SELECTED;
// reset the grouping for the new dimension
r->internal.grouping_reset(r);
- rrd2rrdr_do_dimension(r, points_wanted, rd, c, after_wanted, before_wanted);
- if (timeout)
+ rrd2rrdr_do_dimension(r, c);
+ if (qt->request.timeout)
now_realtime_timeval(&query_current_time);
if(r->od[c] & RRDR_DIMENSION_NONZERO)
@@ -2024,30 +2081,30 @@ RRDR *rrd2rrdr(
else {
if(r->after != max_after) {
internal_error(true, "QUERY: 'after' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)max_after, rd->name, (size_t)r->after);
+ string2str(qt->query.array[c].dimension.id), (size_t)max_after, string2str(qt->query.array[c].dimension.name), (size_t)r->after);
r->after = (r->after > max_after) ? r->after : max_after;
}
if(r->before != min_before) {
internal_error(true, "QUERY: 'before' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)min_before, rd->name, (size_t)r->before);
+ string2str(qt->query.array[c].dimension.id), (size_t)min_before, string2str(qt->query.array[c].dimension.name), (size_t)r->before);
r->before = (r->before < min_before) ? r->before : min_before;
}
if(r->rows != max_rows) {
internal_error(true, "QUERY: 'rows' mismatch between dimensions for chart '%s': max is %zu, dimension '%s' has %zu",
- st->name, (size_t)max_rows, rd->name, (size_t)r->rows);
+ string2str(qt->query.array[c].dimension.id), (size_t)max_rows, string2str(qt->query.array[c].dimension.name), (size_t)r->rows);
r->rows = (r->rows > max_rows) ? r->rows : max_rows;
}
}
dimensions_used++;
- if (timeout && ((NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0) > timeout) {
- log_access("QUERY CANCELED RUNTIME EXCEEDED %0.2f ms (LIMIT %d ms)",
- (NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0, timeout);
+ if (qt->request.timeout && ((NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0) > (NETDATA_DOUBLE)qt->request.timeout) {
+ log_access("QUERY CANCELED RUNTIME EXCEEDED %0.2f ms (LIMIT %lld ms)",
+ (NETDATA_DOUBLE)dt_usec(&query_start_time, &query_current_time) / 1000.0, (long long)qt->request.timeout);
r->result_options |= RRDR_RESULT_OPTION_CANCEL;
break;
}
@@ -2056,44 +2113,44 @@ RRDR *rrd2rrdr(
#ifdef NETDATA_INTERNAL_CHECKS
if (dimensions_used) {
if(r->internal.log)
- rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
- after_wanted, after_requested, before_wanted, before_requested,
- points_requested, points_wanted, /*after_slot, before_slot,*/
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ qt->window.after, qt->request.after, qt->window.before, qt->request.before,
+ qt->request.points, qt->window.points, /*after_slot, before_slot,*/
r->internal.log);
- if(r->rows != points_wanted)
- rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
- after_wanted, after_requested, before_wanted, before_requested,
- points_requested, points_wanted, /*after_slot, before_slot,*/
+ if(r->rows != qt->window.points)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ qt->window.after, qt->request.after, qt->window.before, qt->request.before,
+ qt->request.points, qt->window.points, /*after_slot, before_slot,*/
"got 'points' is not wanted 'points'");
- if(aligned && (r->before % (group * query_granularity)) != 0)
- rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
- after_wanted, after_requested, before_wanted,before_wanted,
- points_requested, points_wanted, /*after_slot, before_slot,*/
+ if(qt->window.aligned && (r->before % (qt->window.group * qt->window.query_granularity)) != 0)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ qt->window.after, qt->request.after, qt->window.before,qt->request.before,
+ qt->request.points, qt->window.points, /*after_slot, before_slot,*/
"'before' is not aligned but alignment is required");
// 'after' should not be aligned, since we start inside the first group
- //if(aligned && (r->after % group) != 0)
- // rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group, after_wanted, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
+ //if(qt->window.aligned && (r->after % group) != 0)
+ // rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group, qt->window.after, after_requested, before_wanted, before_requested, points_requested, points_wanted, after_slot, before_slot, "'after' is not aligned but alignment is required");
- if(r->before != before_wanted)
- rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
- after_wanted, after_requested, before_wanted, before_requested,
- points_requested, points_wanted, /*after_slot, before_slot,*/
+ if(r->before != qt->window.before)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ qt->window.after, qt->request.after, qt->window.before, qt->request.before,
+ qt->request.points, qt->window.points, /*after_slot, before_slot,*/
"chart is not aligned to requested 'before'");
- if(r->before != before_wanted)
- rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
- after_wanted, after_requested, before_wanted, before_requested,
- points_requested, points_wanted, /*after_slot, before_slot,*/
+ if(r->before != qt->window.before)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ qt->window.after, qt->request.after, qt->window.before, qt->request.before,
+ qt->request.points, qt->window.points, /*after_slot, before_slot,*/
"got 'before' is not wanted 'before'");
// reported 'after' varies, depending on group
- if(r->after != after_wanted)
- rrd2rrdr_log_request_response_metadata(r, options, group_method, aligned, group, resampling_time_requested, resampling_group,
- after_wanted, after_requested, before_wanted, before_requested,
- points_requested, points_wanted, /*after_slot, before_slot,*/
+ if(r->after != qt->window.after)
+ rrd2rrdr_log_request_response_metadata(r, qt->window.options, qt->window.group_method, qt->window.aligned, qt->window.group, qt->request.resampling_time, qt->window.resampling_group,
+ qt->window.after, qt->request.after, qt->window.before, qt->request.before,
+ qt->request.points, qt->window.points, /*after_slot, before_slot,*/
"got 'after' is not wanted 'after'");
}
@@ -2103,15 +2160,16 @@ RRDR *rrd2rrdr(
r->internal.grouping_free(r);
// when all the dimensions are zero, we should return all of them
- if(unlikely(options & RRDR_OPTION_NONZERO && !dimensions_nonzero && !(r->result_options & RRDR_RESULT_OPTION_CANCEL))) {
+ if(unlikely((qt->window.options & RRDR_OPTION_NONZERO) && !dimensions_nonzero && !(r->result_options & RRDR_RESULT_OPTION_CANCEL))) {
// all the dimensions are zero
// mark them as NONZERO to send them all
- for(rd = first_rd, c = 0 ; rd && c < dimensions_count ; rd = rd->next, c++) {
+ for(size_t c = 0, max = qt->query.used; c < max ; c++) {
if(unlikely(r->od[c] & RRDR_DIMENSION_HIDDEN)) continue;
r->od[c] |= RRDR_DIMENSION_NONZERO;
}
}
- rrdr_query_completed(r->internal.db_points_read, r->internal.result_points_generated);
+ global_statistics_rrdr_query_completed(dimensions_used, r->internal.db_points_read,
+ r->internal.result_points_generated, qt->request.query_source);
return r;
}
diff --git a/web/api/queries/query.h b/web/api/queries/query.h
index df876d9ac..ebad5a1f8 100644
--- a/web/api/queries/query.h
+++ b/web/api/queries/query.h
@@ -47,10 +47,10 @@ typedef enum rrdr_grouping {
RRDR_GROUPING_COUNTIF,
} RRDR_GROUPING;
-extern const char *group_method2string(RRDR_GROUPING group);
-extern void web_client_api_v1_init_grouping(void);
-extern RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def);
-extern const char *web_client_api_request_v1_data_group_to_string(RRDR_GROUPING group);
+const char *group_method2string(RRDR_GROUPING group);
+void web_client_api_v1_init_grouping(void);
+RRDR_GROUPING web_client_api_request_v1_data_group(const char *name, RRDR_GROUPING def);
+const char *web_client_api_request_v1_data_group_to_string(RRDR_GROUPING group);
#ifdef __cplusplus
}
diff --git a/web/api/queries/rrdr.c b/web/api/queries/rrdr.c
index ecf4ca2ac..676224c9d 100644
--- a/web/api/queries/rrdr.c
+++ b/web/api/queries/rrdr.c
@@ -61,9 +61,7 @@ static void rrdr_dump(RRDR *r)
inline void rrdr_free(ONEWAYALLOC *owa, RRDR *r) {
if(unlikely(!r)) return;
- if(likely(r->st_locked_by_rrdr_create))
- rrdset_unlock(r->st);
-
+ query_target_release(r->internal.qt);
onewayalloc_freez(owa, r->t);
onewayalloc_freez(owa, r->v);
onewayalloc_freez(owa, r->o);
@@ -72,12 +70,23 @@ inline void rrdr_free(ONEWAYALLOC *owa, RRDR *r) {
onewayalloc_freez(owa, r);
}
-RRDR *rrdr_create_for_x_dimensions(ONEWAYALLOC *owa, int dimensions, long points) {
+RRDR *rrdr_create(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
+ if(unlikely(!qt || !qt->query.used || !qt->window.points))
+ return NULL;
+
+ size_t dimensions = qt->query.used;
+ size_t points = qt->window.points;
+
+ // create the rrdr
RRDR *r = onewayalloc_callocz(owa, 1, sizeof(RRDR));
r->internal.owa = owa;
+ r->internal.qt = qt;
- r->d = dimensions;
- r->n = points;
+ r->before = qt->window.before;
+ r->after = qt->window.after;
+ r->internal.points_wanted = qt->window.points;
+ r->d = (int)dimensions;
+ r->n = (int)points;
r->t = onewayalloc_callocz(owa, points, sizeof(time_t));
r->v = onewayalloc_mallocz(owa, points * dimensions * sizeof(NETDATA_DOUBLE));
@@ -90,42 +99,3 @@ RRDR *rrdr_create_for_x_dimensions(ONEWAYALLOC *owa, int dimensions, long points
return r;
}
-
-RRDR *rrdr_create(ONEWAYALLOC *owa, struct rrdset *st, long n, struct context_param *context_param_list) {
- if (unlikely(!st)) return NULL;
-
- bool st_locked_by_rrdr_create = false;
- if (!context_param_list || !(context_param_list->flags & CONTEXT_FLAGS_ARCHIVE)) {
- rrdset_rdlock(st);
- st_locked_by_rrdr_create = true;
- }
-
- // count the number of dimensions
- int dimensions = 0;
- RRDDIM *temp_rd = context_param_list ? context_param_list->rd : NULL;
- RRDDIM *rd;
- if (temp_rd) {
- RRDDIM *t = temp_rd;
- while (t) {
- dimensions++;
- t = t->next;
- }
- } else
- rrddim_foreach_read(rd, st) dimensions++;
-
- // create the rrdr
- RRDR *r = rrdr_create_for_x_dimensions(owa, dimensions, n);
- r->st = st;
- r->st_locked_by_rrdr_create = st_locked_by_rrdr_create;
-
- // set the hidden flag on hidden dimensions
- int c;
- for (c = 0, rd = temp_rd ? temp_rd : st->dimensions; rd; c++, rd = rd->next) {
- if (unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_HIDDEN)))
- r->od[c] = RRDR_DIMENSION_HIDDEN;
- else
- r->od[c] = RRDR_DIMENSION_DEFAULT;
- }
-
- return r;
-}
diff --git a/web/api/queries/rrdr.h b/web/api/queries/rrdr.h
index 1c80e103f..6151cddc7 100644
--- a/web/api/queries/rrdr.h
+++ b/web/api/queries/rrdr.h
@@ -18,32 +18,33 @@ typedef enum tier_query_fetch {
} TIER_QUERY_FETCH;
typedef enum rrdr_options {
- RRDR_OPTION_NONZERO = 0x00000001, // don't output dimensions with just zero values
- RRDR_OPTION_REVERSED = 0x00000002, // output the rows in reverse order (oldest to newest)
- RRDR_OPTION_ABSOLUTE = 0x00000004, // values positive, for DATASOURCE_SSV before summing
- RRDR_OPTION_MIN2MAX = 0x00000008, // when adding dimensions, use max - min, instead of sum
- RRDR_OPTION_SECONDS = 0x00000010, // output seconds, instead of dates
- RRDR_OPTION_MILLISECONDS = 0x00000020, // output milliseconds, instead of dates
- RRDR_OPTION_NULL2ZERO = 0x00000040, // do not show nulls, convert them to zeros
- RRDR_OPTION_OBJECTSROWS = 0x00000080, // each row of values should be an object, not an array
- RRDR_OPTION_GOOGLE_JSON = 0x00000100, // comply with google JSON/JSONP specs
- RRDR_OPTION_JSON_WRAP = 0x00000200, // wrap the response in a JSON header with info about the result
- RRDR_OPTION_LABEL_QUOTES = 0x00000400, // in CSV output, wrap header labels in double quotes
- RRDR_OPTION_PERCENTAGE = 0x00000800, // give values as percentage of total
- RRDR_OPTION_NOT_ALIGNED = 0x00001000, // do not align charts for persistent timeframes
- RRDR_OPTION_DISPLAY_ABS = 0x00002000, // for badges, display the absolute value, but calculate colors with sign
- RRDR_OPTION_MATCH_IDS = 0x00004000, // when filtering dimensions, match only IDs
- RRDR_OPTION_MATCH_NAMES = 0x00008000, // when filtering dimensions, match only names
- RRDR_OPTION_CUSTOM_VARS = 0x00010000, // when wrapping response in a JSON, return custom variables in response
- RRDR_OPTION_NATURAL_POINTS = 0x00020000, // return the natural points of the database
- RRDR_OPTION_VIRTUAL_POINTS = 0x00040000, // return virtual points
- RRDR_OPTION_ANOMALY_BIT = 0x00080000, // Return the anomaly bit stored in each collected_number
- RRDR_OPTION_RETURN_RAW = 0x00100000, // Return raw data for aggregating across multiple nodes
- RRDR_OPTION_RETURN_JWAR = 0x00200000, // Return anomaly rates in jsonwrap
- RRDR_OPTION_SELECTED_TIER = 0x00400000, // Use the selected tier for the query
+ RRDR_OPTION_NONZERO = 0x00000001, // don't output dimensions with just zero values
+ RRDR_OPTION_REVERSED = 0x00000002, // output the rows in reverse order (oldest to newest)
+ RRDR_OPTION_ABSOLUTE = 0x00000004, // values positive, for DATASOURCE_SSV before summing
+ RRDR_OPTION_MIN2MAX = 0x00000008, // when adding dimensions, use max - min, instead of sum
+ RRDR_OPTION_SECONDS = 0x00000010, // output seconds, instead of dates
+ RRDR_OPTION_MILLISECONDS = 0x00000020, // output milliseconds, instead of dates
+ RRDR_OPTION_NULL2ZERO = 0x00000040, // do not show nulls, convert them to zeros
+ RRDR_OPTION_OBJECTSROWS = 0x00000080, // each row of values should be an object, not an array
+ RRDR_OPTION_GOOGLE_JSON = 0x00000100, // comply with google JSON/JSONP specs
+ RRDR_OPTION_JSON_WRAP = 0x00000200, // wrap the response in a JSON header with info about the result
+ RRDR_OPTION_LABEL_QUOTES = 0x00000400, // in CSV output, wrap header labels in double quotes
+ RRDR_OPTION_PERCENTAGE = 0x00000800, // give values as percentage of total
+ RRDR_OPTION_NOT_ALIGNED = 0x00001000, // do not align charts for persistent timeframes
+ RRDR_OPTION_DISPLAY_ABS = 0x00002000, // for badges, display the absolute value, but calculate colors with sign
+ RRDR_OPTION_MATCH_IDS = 0x00004000, // when filtering dimensions, match only IDs
+ RRDR_OPTION_MATCH_NAMES = 0x00008000, // when filtering dimensions, match only names
+ RRDR_OPTION_NATURAL_POINTS = 0x00020000, // return the natural points of the database
+ RRDR_OPTION_VIRTUAL_POINTS = 0x00040000, // return virtual points
+ RRDR_OPTION_ANOMALY_BIT = 0x00080000, // Return the anomaly bit stored in each collected_number
+ RRDR_OPTION_RETURN_RAW = 0x00100000, // Return raw data for aggregating across multiple nodes
+ RRDR_OPTION_RETURN_JWAR = 0x00200000, // Return anomaly rates in jsonwrap
+ RRDR_OPTION_SELECTED_TIER = 0x00400000, // Use the selected tier for the query
+ RRDR_OPTION_ALL_DIMENSIONS = 0x00800000, // Return the full dimensions list
// internal ones - not to be exposed to the API
- RRDR_OPTION_INTERNAL_AR = 0x10000000, // internal use only, to let the formatters we want to render the anomaly rate
+ RRDR_OPTION_INTERNAL_AR = 0x10000000, // internal use only, to let the formatters we want to render the anomaly rate
+ RRDR_OPTION_HEALTH_RSRVD1 = 0x80000000, // reserved for RRDCALC_OPTION_NO_CLEAR_NOTIFICATION
} RRDR_OPTIONS;
typedef enum rrdr_value_flag {
@@ -67,16 +68,14 @@ typedef enum rrdr_result_flags {
// (should not to be cached by browsers and proxies)
RRDR_RESULT_OPTION_VARIABLE_STEP = 0x00000004, // the query uses variable-step time-frames
RRDR_RESULT_OPTION_CANCEL = 0x00000008, // the query needs to be cancelled
-} RRDR_RESULT_FLAGS;
+} RRDR_RESULT_OPTIONS;
typedef struct rrdresult {
- struct rrdset *st; // the chart this result refers to
+ RRDR_RESULT_OPTIONS result_options; // RRDR_RESULT_OPTION_*
- RRDR_RESULT_FLAGS result_options; // RRDR_RESULT_OPTION_*
-
- int d; // the number of dimensions
- long n; // the number of values in the arrays
- long rows; // the number of rows used
+ size_t d; // the number of dimensions
+ size_t n; // the number of values in the arrays
+ size_t rows; // the number of rows used
RRDR_DIMENSION_FLAGS *od; // the options for the dimensions
@@ -85,8 +84,8 @@ typedef struct rrdresult {
RRDR_VALUE_FLAGS *o; // array n x d options for each value returned
NETDATA_DOUBLE *ar; // array n x d of anomaly rates (0 - 100)
- long group; // how many collected values were grouped for each row
- int update_every; // what is the suggested update frequency in seconds
+ size_t group; // how many collected values were grouped for each row
+ time_t update_every; // what is the suggested update frequency in seconds
NETDATA_DOUBLE min;
NETDATA_DOUBLE max;
@@ -94,53 +93,57 @@ typedef struct rrdresult {
time_t before;
time_t after;
- bool st_locked_by_rrdr_create; // if st is read locked by us
-
// internal rrd2rrdr() members below this point
struct {
- int query_tier; // the selected tier
- RRDR_OPTIONS query_options; // RRDR_OPTION_* (as run by the query)
+ ONEWAYALLOC *owa; // the allocator used
+ struct query_target *qt; // the QUERY_TARGET
+
+ RRDR_OPTIONS query_options; // RRDR_OPTION_* (as run by the query)
- long points_wanted;
- long resampling_group;
- NETDATA_DOUBLE resampling_divisor;
+ size_t points_wanted; // used by SES and DES
+ size_t resampling_group; // used by AVERAGE
+ NETDATA_DOUBLE resampling_divisor; // used by AVERAGE
+ // grouping function pointers
void (*grouping_create)(struct rrdresult *r, const char *options);
void (*grouping_reset)(struct rrdresult *r);
void (*grouping_free)(struct rrdresult *r);
void (*grouping_add)(struct rrdresult *r, NETDATA_DOUBLE value);
NETDATA_DOUBLE (*grouping_flush)(struct rrdresult *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
- void *grouping_data;
- TIER_QUERY_FETCH tier_query_fetch;
- #ifdef NETDATA_INTERNAL_CHECKS
+ TIER_QUERY_FETCH tier_query_fetch; // which value to use from STORAGE_POINT
+ void *grouping_data; // the internal data of the grouping function
+
+#ifdef NETDATA_INTERNAL_CHECKS
const char *log;
- #endif
+#endif
+ // statistics
size_t db_points_read;
size_t result_points_generated;
size_t tier_points_read[RRD_STORAGE_TIERS];
- ONEWAYALLOC *owa;
} internal;
} RRDR;
#define rrdr_rows(r) ((r)->rows)
#include "database/rrd.h"
-extern void rrdr_free(ONEWAYALLOC *owa, RRDR *r);
-extern RRDR *rrdr_create(ONEWAYALLOC *owa, struct rrdset *st, long n, struct context_param *context_param_list);
-extern RRDR *rrdr_create_for_x_dimensions(ONEWAYALLOC *owa, int dimensions, long points);
+void rrdr_free(ONEWAYALLOC *owa, RRDR *r);
+RRDR *rrdr_create(ONEWAYALLOC *owa, struct query_target *qt);
#include "../web_api_v1.h"
#include "web/api/queries/query.h"
-extern RRDR *rrd2rrdr(
- ONEWAYALLOC *owa,
- RRDSET *st, long points_wanted, long long after_wanted, long long before_wanted,
- RRDR_GROUPING group_method, long resampling_time_requested, RRDR_OPTIONS options, const char *dimensions,
- struct context_param *context_param_list, const char *group_options, int timeout, int tier);
+RRDR *rrd2rrdr_legacy(
+ ONEWAYALLOC *owa,
+ RRDSET *st, size_t points, time_t after, time_t before,
+ RRDR_GROUPING group_method, time_t resampling_time, RRDR_OPTIONS options, const char *dimensions,
+ const char *group_options, time_t timeout, size_t tier, QUERY_SOURCE query_source);
+
+RRDR *rrd2rrdr(ONEWAYALLOC *owa, struct query_target *qt);
+bool query_target_calculate_window(struct query_target *qt);
-extern int rrdr_relative_window_to_absolute(long long *after, long long *before);
+bool rrdr_relative_window_to_absolute(time_t *after, time_t *before);
#ifdef __cplusplus
}
diff --git a/web/api/queries/ses/ses.h b/web/api/queries/ses/ses.h
index 094b8de3f..79b09fbdf 100644
--- a/web/api/queries/ses/ses.h
+++ b/web/api/queries/ses/ses.h
@@ -6,12 +6,12 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_init_ses(void);
+void grouping_init_ses(void);
-extern void grouping_create_ses(RRDR *r, const char *options __maybe_unused);
-extern void grouping_reset_ses(RRDR *r);
-extern void grouping_free_ses(RRDR *r);
-extern void grouping_add_ses(RRDR *r, NETDATA_DOUBLE value);
-extern NETDATA_DOUBLE grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+void grouping_create_ses(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_ses(RRDR *r);
+void grouping_free_ses(RRDR *r);
+void grouping_add_ses(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_ses(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERIES_SES_H
diff --git a/web/api/queries/stddev/stddev.h b/web/api/queries/stddev/stddev.h
index c5c91f88d..4b8ffcd53 100644
--- a/web/api/queries/stddev/stddev.h
+++ b/web/api/queries/stddev/stddev.h
@@ -6,13 +6,13 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_stddev(RRDR *r, const char *options __maybe_unused);
-extern void grouping_reset_stddev(RRDR *r);
-extern void grouping_free_stddev(RRDR *r);
-extern void grouping_add_stddev(RRDR *r, NETDATA_DOUBLE value);
-extern NETDATA_DOUBLE grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
-extern NETDATA_DOUBLE grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
-// extern NETDATA_DOUBLE grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
-// extern NETDATA_DOUBLE grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+void grouping_create_stddev(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_stddev(RRDR *r);
+void grouping_free_stddev(RRDR *r);
+void grouping_add_stddev(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_stddev(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+NETDATA_DOUBLE grouping_flush_coefficient_of_variation(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+// NETDATA_DOUBLE grouping_flush_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+// NETDATA_DOUBLE grouping_flush_variance(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERIES_STDDEV_H
diff --git a/web/api/queries/sum/sum.h b/web/api/queries/sum/sum.h
index 4e7e396e9..898782775 100644
--- a/web/api/queries/sum/sum.h
+++ b/web/api/queries/sum/sum.h
@@ -6,10 +6,10 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_sum(RRDR *r, const char *options __maybe_unused);
-extern void grouping_reset_sum(RRDR *r);
-extern void grouping_free_sum(RRDR *r);
-extern void grouping_add_sum(RRDR *r, NETDATA_DOUBLE value);
-extern NETDATA_DOUBLE grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+void grouping_create_sum(RRDR *r, const char *options __maybe_unused);
+void grouping_reset_sum(RRDR *r);
+void grouping_free_sum(RRDR *r);
+void grouping_add_sum(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_sum(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERY_SUM_H
diff --git a/web/api/queries/trimmed_mean/trimmed_mean.h b/web/api/queries/trimmed_mean/trimmed_mean.h
index 1a4f63e9c..e66d92541 100644
--- a/web/api/queries/trimmed_mean/trimmed_mean.h
+++ b/web/api/queries/trimmed_mean/trimmed_mean.h
@@ -6,17 +6,17 @@
#include "../query.h"
#include "../rrdr.h"
-extern void grouping_create_trimmed_mean1(RRDR *r, const char *options);
-extern void grouping_create_trimmed_mean2(RRDR *r, const char *options);
-extern void grouping_create_trimmed_mean3(RRDR *r, const char *options);
-extern void grouping_create_trimmed_mean5(RRDR *r, const char *options);
-extern void grouping_create_trimmed_mean10(RRDR *r, const char *options);
-extern void grouping_create_trimmed_mean15(RRDR *r, const char *options);
-extern void grouping_create_trimmed_mean20(RRDR *r, const char *options);
-extern void grouping_create_trimmed_mean25(RRDR *r, const char *options);
-extern void grouping_reset_trimmed_mean(RRDR *r);
-extern void grouping_free_trimmed_mean(RRDR *r);
-extern void grouping_add_trimmed_mean(RRDR *r, NETDATA_DOUBLE value);
-extern NETDATA_DOUBLE grouping_flush_trimmed_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
+void grouping_create_trimmed_mean1(RRDR *r, const char *options);
+void grouping_create_trimmed_mean2(RRDR *r, const char *options);
+void grouping_create_trimmed_mean3(RRDR *r, const char *options);
+void grouping_create_trimmed_mean5(RRDR *r, const char *options);
+void grouping_create_trimmed_mean10(RRDR *r, const char *options);
+void grouping_create_trimmed_mean15(RRDR *r, const char *options);
+void grouping_create_trimmed_mean20(RRDR *r, const char *options);
+void grouping_create_trimmed_mean25(RRDR *r, const char *options);
+void grouping_reset_trimmed_mean(RRDR *r);
+void grouping_free_trimmed_mean(RRDR *r);
+void grouping_add_trimmed_mean(RRDR *r, NETDATA_DOUBLE value);
+NETDATA_DOUBLE grouping_flush_trimmed_mean(RRDR *r, RRDR_VALUE_FLAGS *rrdr_value_options_ptr);
#endif //NETDATA_API_QUERIES_TRIMMED_MEAN_H
diff --git a/web/api/queries/weights.c b/web/api/queries/weights.c
index 97a00f91c..a9555a66b 100644
--- a/web/api/queries/weights.c
+++ b/web/api/queries/weights.c
@@ -56,40 +56,14 @@ typedef enum {
struct register_result {
RESULT_FLAGS flags;
- RRDSET *st;
- const char *chart_id;
- const char *context;
- const char *dim_name;
+ RRDCONTEXT_ACQUIRED *rca;
+ RRDINSTANCE_ACQUIRED *ria;
+ RRDMETRIC_ACQUIRED *rma;
NETDATA_DOUBLE value;
-
- struct register_result *next; // used to link contexts together
};
-static void register_result_insert_callback(const char *name, void *value, void *data) {
- (void)name;
- (void)data;
-
- struct register_result *t = (struct register_result *)value;
-
- if(t->chart_id) t->chart_id = strdupz(t->chart_id);
- if(t->context) t->context = strdupz(t->context);
- if(t->dim_name) t->dim_name = strdupz(t->dim_name);
-}
-
-static void register_result_delete_callback(const char *name, void *value, void *data) {
- (void)name;
- (void)data;
- struct register_result *t = (struct register_result *)value;
-
- freez((void *)t->chart_id);
- freez((void *)t->context);
- freez((void *)t->dim_name);
-}
-
static DICTIONARY *register_result_init() {
- DICTIONARY *results = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED);
- dictionary_register_insert_callback(results, register_result_insert_callback, results);
- dictionary_register_delete_callback(results, register_result_delete_callback, results);
+ DICTIONARY *results = dictionary_create(DICT_OPTION_SINGLE_THREADED);
return results;
}
@@ -98,8 +72,9 @@ static void register_result_destroy(DICTIONARY *results) {
}
static void register_result(DICTIONARY *results,
- RRDSET *st,
- RRDDIM *d,
+ RRDCONTEXT_ACQUIRED *rca,
+ RRDINSTANCE_ACQUIRED *ria,
+ RRDMETRIC_ACQUIRED *rma,
NETDATA_DOUBLE value,
RESULT_FLAGS flags,
WEIGHTS_STATS *stats,
@@ -120,25 +95,25 @@ static void register_result(DICTIONARY *results,
struct register_result t = {
.flags = flags,
- .st = st,
- .chart_id = st->id,
- .context = st->context,
- .dim_name = d->name,
+ .rca = rca,
+ .ria = ria,
+ .rma = rma,
.value = v
};
- char buf[5000 + 1];
- snprintfz(buf, 5000, "%s:%s", st->id, d->name);
- dictionary_set(results, buf, &t, sizeof(struct register_result));
+ // we can use the pointer address or RMA as a unique key for each metric
+ char buf[20 + 1];
+ ssize_t len = snprintfz(buf, 20, "%p", rma);
+ dictionary_set_advanced(results, buf, len + 1, &t, sizeof(struct register_result), NULL);
}
// ----------------------------------------------------------------------------
// Generation of JSON output for the results
static void results_header_to_json(DICTIONARY *results __maybe_unused, BUFFER *wb,
- long long after, long long before,
- long long baseline_after, long long baseline_before,
- long points, WEIGHTS_METHOD method,
+ time_t after, time_t before,
+ time_t baseline_after, time_t baseline_before,
+ size_t points, WEIGHTS_METHOD method,
RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
size_t examined_dimensions __maybe_unused, usec_t duration,
WEIGHTS_STATS *stats) {
@@ -147,10 +122,10 @@ static void results_header_to_json(DICTIONARY *results __maybe_unused, BUFFER *w
"\t\"after\": %lld,\n"
"\t\"before\": %lld,\n"
"\t\"duration\": %lld,\n"
- "\t\"points\": %ld,\n",
- after,
- before,
- before - after,
+ "\t\"points\": %zu,\n",
+ (long long)after,
+ (long long)before,
+ (long long)(before - after),
points
);
@@ -159,10 +134,10 @@ static void results_header_to_json(DICTIONARY *results __maybe_unused, BUFFER *w
"\t\"baseline_after\": %lld,\n"
"\t\"baseline_before\": %lld,\n"
"\t\"baseline_duration\": %lld,\n"
- "\t\"baseline_points\": %ld,\n",
- baseline_after,
- baseline_before,
- baseline_before - baseline_after,
+ "\t\"baseline_points\": %zu,\n",
+ (long long)baseline_after,
+ (long long)baseline_before,
+ (long long)(baseline_before - baseline_after),
points << shifts
);
@@ -181,7 +156,7 @@ static void results_header_to_json(DICTIONARY *results __maybe_unused, BUFFER *w
stats->db_points
);
- for(int tier = 0; tier < storage_tiers ;tier++)
+ for(size_t tier = 0; tier < storage_tiers ;tier++)
buffer_sprintf(wb, "%s%zu", tier?", ":"", stats->db_points_per_tier[tier]);
buffer_sprintf(wb, " ]\n"
@@ -193,13 +168,13 @@ static void results_header_to_json(DICTIONARY *results __maybe_unused, BUFFER *w
weights_method_to_string(method)
);
- web_client_api_request_v1_data_options_to_string(wb, options);
+ web_client_api_request_v1_data_options_to_buffer(wb, options);
}
static size_t registered_results_to_json_charts(DICTIONARY *results, BUFFER *wb,
- long long after, long long before,
- long long baseline_after, long long baseline_before,
- long points, WEIGHTS_METHOD method,
+ time_t after, time_t before,
+ time_t baseline_after, time_t baseline_before,
+ size_t points, WEIGHTS_METHOD method,
RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
size_t examined_dimensions, usec_t duration,
WEIGHTS_STATS *stats) {
@@ -211,23 +186,23 @@ static size_t registered_results_to_json_charts(DICTIONARY *results, BUFFER *wb,
size_t charts = 0, chart_dims = 0, total_dimensions = 0;
struct register_result *t;
- RRDSET *last_st = NULL; // never access this - we use it only for comparison
+ RRDINSTANCE_ACQUIRED *last_ria = NULL; // never access this - we use it only for comparison
dfe_start_read(results, t) {
- if(!last_st || t->st != last_st) {
- last_st = t->st;
+ if(t->ria != last_ria) {
+ last_ria = t->ria;
if(charts) buffer_strcat(wb, "\n\t\t\t}\n\t\t},\n");
buffer_strcat(wb, "\t\t\"");
- buffer_strcat(wb, t->chart_id);
+ buffer_strcat(wb, rrdinstance_acquired_id(t->ria));
buffer_strcat(wb, "\": {\n");
buffer_strcat(wb, "\t\t\t\"context\": \"");
- buffer_strcat(wb, t->context);
+ buffer_strcat(wb, rrdcontext_acquired_id(t->rca));
buffer_strcat(wb, "\",\n\t\t\t\"dimensions\": {\n");
charts++;
chart_dims = 0;
}
if (chart_dims) buffer_sprintf(wb, ",\n");
- buffer_sprintf(wb, "\t\t\t\t\"%s\": " NETDATA_DOUBLE_FORMAT, t->dim_name, t->value);
+ buffer_sprintf(wb, "\t\t\t\t\"%s\": " NETDATA_DOUBLE_FORMAT, rrdmetric_acquired_name(t->rma), t->value);
chart_dims++;
total_dimensions++;
}
@@ -250,9 +225,9 @@ static size_t registered_results_to_json_charts(DICTIONARY *results, BUFFER *wb,
}
static size_t registered_results_to_json_contexts(DICTIONARY *results, BUFFER *wb,
- long long after, long long before,
- long long baseline_after, long long baseline_before,
- long points, WEIGHTS_METHOD method,
+ time_t after, time_t before,
+ time_t baseline_after, time_t baseline_before,
+ size_t points, WEIGHTS_METHOD method,
RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
size_t examined_dimensions, usec_t duration,
WEIGHTS_STATS *stats) {
@@ -260,78 +235,80 @@ static size_t registered_results_to_json_contexts(DICTIONARY *results, BUFFER *w
results_header_to_json(results, wb, after, before, baseline_after, baseline_before,
points, method, group, options, shifts, examined_dimensions, duration, stats);
- DICTIONARY *context_results = dictionary_create(
- DICTIONARY_FLAG_SINGLE_THREADED
- |DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE
- |DICTIONARY_FLAG_NAME_LINK_DONT_CLONE
- |DICTIONARY_FLAG_DONT_OVERWRITE_VALUE
- );
+ buffer_strcat(wb, "\",\n\t\"contexts\": {\n");
+ size_t contexts = 0, charts = 0, total_dimensions = 0, context_dims = 0, chart_dims = 0;
+ NETDATA_DOUBLE contexts_total_weight = 0.0, charts_total_weight = 0.0;
struct register_result *t;
+ RRDCONTEXT_ACQUIRED *last_rca = NULL;
+ RRDINSTANCE_ACQUIRED *last_ria = NULL;
dfe_start_read(results, t) {
- struct register_result *tc = dictionary_set(context_results, t->context, t, sizeof(*t));
- if(tc == t)
- t->next = NULL;
- else {
- t->next = tc->next;
- tc->next = t;
+
+ if(t->rca != last_rca) {
+ last_rca = t->rca;
+
+ if(contexts)
+ buffer_sprintf(wb, "\n"
+ "\t\t\t\t\t},\n"
+ "\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
+ "\t\t\t\t}\n\t\t\t},\n"
+ "\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t},\n"
+ , charts_total_weight / (double)chart_dims
+ , contexts_total_weight / (double)context_dims);
+
+ buffer_strcat(wb, "\t\t\"");
+ buffer_strcat(wb, rrdcontext_acquired_id(t->rca));
+ buffer_strcat(wb, "\": {\n\t\t\t\"charts\":{\n");
+
+ contexts++;
+ charts = 0;
+ context_dims = 0;
+ contexts_total_weight = 0.0;
+
+ last_ria = NULL;
}
- }
- dfe_done(t);
- buffer_strcat(wb, "\",\n\t\"contexts\": {\n");
+ if(t->ria != last_ria) {
+ last_ria = t->ria;
- size_t contexts = 0, total_dimensions = 0, charts = 0, context_dims = 0, chart_dims = 0;
- NETDATA_DOUBLE contexts_total_weight = 0.0, charts_total_weight = 0.0;
- RRDSET *last_st = NULL; // never access this - we use it only for comparison
- dfe_start_read(context_results, t) {
-
- if(contexts)
- buffer_sprintf(wb, "\n\t\t\t\t\t},\n\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t},\n", charts_total_weight / chart_dims, contexts_total_weight / context_dims);
-
- contexts++;
- context_dims = 0;
- contexts_total_weight = 0.0;
-
- buffer_strcat(wb, "\t\t\"");
- buffer_strcat(wb, t->context);
- buffer_strcat(wb, "\": {\n\t\t\t\"charts\":{\n");
-
- charts = 0;
- chart_dims = 0;
- struct register_result *tt;
- for(tt = t; tt ; tt = tt->next) {
- if(!last_st || tt->st != last_st) {
- last_st = tt->st;
-
- if(charts)
- buffer_sprintf(wb, "\n\t\t\t\t\t},\n\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t\t\t},\n", charts_total_weight / chart_dims);
-
- buffer_strcat(wb, "\t\t\t\t\"");
- buffer_strcat(wb, tt->chart_id);
- buffer_strcat(wb, "\": {\n");
- buffer_strcat(wb, "\t\t\t\t\t\"dimensions\": {\n");
- charts++;
- chart_dims = 0;
- charts_total_weight = 0.0;
- }
-
- if (chart_dims) buffer_sprintf(wb, ",\n");
- buffer_sprintf(wb, "\t\t\t\t\t\t\"%s\": " NETDATA_DOUBLE_FORMAT, tt->dim_name, tt->value);
- charts_total_weight += tt->value;
- contexts_total_weight += tt->value;
- chart_dims++;
- context_dims++;
- total_dimensions++;
+ if(charts)
+ buffer_sprintf(wb, "\n"
+ "\t\t\t\t\t},\n"
+ "\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
+ "\t\t\t\t},\n"
+ , charts_total_weight / (double)chart_dims);
+
+ buffer_strcat(wb, "\t\t\t\t\"");
+ buffer_strcat(wb, rrdinstance_acquired_id(t->ria));
+ buffer_strcat(wb, "\": {\n");
+ buffer_strcat(wb, "\t\t\t\t\t\"dimensions\": {\n");
+
+ charts++;
+ chart_dims = 0;
+ charts_total_weight = 0.0;
}
+
+ if (chart_dims) buffer_sprintf(wb, ",\n");
+ buffer_sprintf(wb, "\t\t\t\t\t\t\"%s\": " NETDATA_DOUBLE_FORMAT, rrdmetric_acquired_name(t->rma), t->value);
+ charts_total_weight += t->value;
+ contexts_total_weight += t->value;
+ chart_dims++;
+ context_dims++;
+ total_dimensions++;
}
dfe_done(t);
- dictionary_destroy(context_results);
-
// close dimensions and chart
if (total_dimensions)
- buffer_sprintf(wb, "\n\t\t\t\t\t},\n\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t}\n", charts_total_weight / chart_dims, contexts_total_weight / context_dims);
+ buffer_sprintf(wb, "\n"
+ "\t\t\t\t\t},\n"
+ "\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
+ "\t\t\t\t}\n"
+ "\t\t\t},\n"
+ "\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
+ "\t\t}\n"
+ , charts_total_weight / (double)chart_dims
+ , contexts_total_weight / (double)context_dims);
// close correlated_charts
buffer_sprintf(wb, "\t},\n"
@@ -391,7 +368,10 @@ static size_t calculate_pairs_diff(DIFFS_NUMBERS *diffs, NETDATA_DOUBLE *arr, si
return added;
}
-static double ks_2samp(DIFFS_NUMBERS baseline_diffs[], int base_size, DIFFS_NUMBERS highlight_diffs[], int high_size, uint32_t base_shifts) {
+static double ks_2samp(
+ DIFFS_NUMBERS baseline_diffs[], int base_size,
+ DIFFS_NUMBERS highlight_diffs[], int high_size,
+ uint32_t base_shifts) {
qsort(baseline_diffs, base_size, sizeof(DIFFS_NUMBERS), compare_diffs);
qsort(highlight_diffs, high_size, sizeof(DIFFS_NUMBERS), compare_diffs);
@@ -414,7 +394,7 @@ static double ks_2samp(DIFFS_NUMBERS baseline_diffs[], int base_size, DIFFS_NUMB
// This would require a lot of multiplications and divisions.
//
// To speed it up, we do the binary search to find the index of each number
- // but then we divide the base index by the power of two number (shifts) it
+ // but, then we divide the base index by the power of two number (shifts) it
// is bigger than high index. So the 2 indexes are now comparable.
// We also keep track of the original indexes with min and max, to properly
// calculate their percentages once the loops finish.
@@ -495,7 +475,9 @@ static double ks_2samp(DIFFS_NUMBERS baseline_diffs[], int base_size, DIFFS_NUMB
static double kstwo(
NETDATA_DOUBLE baseline[], int baseline_points,
- NETDATA_DOUBLE highlight[], int highlight_points, uint32_t base_shifts) {
+ NETDATA_DOUBLE highlight[], int highlight_points,
+ uint32_t base_shifts) {
+
// -1 in size, since the calculate_pairs_diffs() returns one less point
DIFFS_NUMBERS baseline_diffs[baseline_points - 1];
DIFFS_NUMBERS highlight_diffs[highlight_points - 1];
@@ -514,308 +496,215 @@ static double kstwo(
return ks_2samp(baseline_diffs, base_size, highlight_diffs, high_size, base_shifts);
}
+NETDATA_DOUBLE *rrd2rrdr_ks2(
+ ONEWAYALLOC *owa, RRDHOST *host,
+ RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria, RRDMETRIC_ACQUIRED *rma,
+ time_t after, time_t before, size_t points, RRDR_OPTIONS options,
+ RRDR_GROUPING group_method, const char *group_options, size_t tier,
+ WEIGHTS_STATS *stats,
+ size_t *entries
+ ) {
+
+ NETDATA_DOUBLE *ret = NULL;
+
+ QUERY_TARGET_REQUEST qtr = {
+ .host = host,
+ .rca = rca,
+ .ria = ria,
+ .rma = rma,
+ .after = after,
+ .before = before,
+ .points = points,
+ .options = options,
+ .group_method = group_method,
+ .group_options = group_options,
+ .tier = tier,
+ .query_source = QUERY_SOURCE_API_WEIGHTS,
+ };
-static int rrdset_metric_correlations_ks2(RRDSET *st, DICTIONARY *results,
- long long baseline_after, long long baseline_before,
- long long after, long long before,
- long long points, RRDR_OPTIONS options,
- RRDR_GROUPING group, const char *group_options, int tier,
- uint32_t shifts, int timeout,
- WEIGHTS_STATS *stats, bool register_zero) {
- options |= RRDR_OPTION_NATURAL_POINTS;
-
- long group_time = 0;
- struct context_param *context_param_list = NULL;
-
- int examined_dimensions = 0;
-
- RRDR *high_rrdr = NULL;
- RRDR *base_rrdr = NULL;
-
- // get first the highlight to find the number of points available
- stats->db_queries++;
- usec_t started_usec = now_realtime_usec();
- ONEWAYALLOC *owa = onewayalloc_create(0);
- high_rrdr = rrd2rrdr(owa, st, points,
- after, before, group,
- group_time, options, NULL, context_param_list, group_options,
- timeout, tier);
- if(!high_rrdr) {
- info("Metric correlations: rrd2rrdr() failed for the highlighted window on chart '%s'.", st->name);
+ RRDR *r = rrd2rrdr(owa, query_target_create(&qtr));
+ if(!r)
goto cleanup;
- }
- for(int i = 0; i < storage_tiers ;i++)
- stats->db_points_per_tier[i] += high_rrdr->internal.tier_points_read[i];
+ stats->db_queries++;
+ stats->result_points += r->internal.result_points_generated;
+ stats->db_points += r->internal.db_points_read;
+ for(size_t tr = 0; tr < storage_tiers ; tr++)
+ stats->db_points_per_tier[tr] += r->internal.tier_points_read[tr];
- stats->db_points += high_rrdr->internal.db_points_read;
- stats->result_points += high_rrdr->internal.result_points_generated;
- if(!high_rrdr->d) {
- info("Metric correlations: rrd2rrdr() did not return any dimensions on chart '%s'.", st->name);
+ if(r->d != 1) {
+ error("WEIGHTS: on query '%s' expected 1 dimension in RRDR but got %zu", r->internal.qt->id, r->d);
goto cleanup;
}
- if(high_rrdr->result_options & RRDR_RESULT_OPTION_CANCEL) {
- info("Metric correlations: rrd2rrdr() on highlighted window timed out '%s'.", st->name);
+
+ if(unlikely(r->od[0] & RRDR_DIMENSION_HIDDEN))
goto cleanup;
- }
- int high_points = rrdr_rows(high_rrdr);
- usec_t now_usec = now_realtime_usec();
- if(now_usec - started_usec > timeout * USEC_PER_MS)
+ if(unlikely(!(r->od[0] & RRDR_DIMENSION_NONZERO)))
goto cleanup;
- // get the baseline, requesting the same number of points as the highlight
- stats->db_queries++;
- base_rrdr = rrd2rrdr(owa, st,high_points << shifts,
- baseline_after, baseline_before, group,
- group_time, options, NULL, context_param_list, group_options,
- (int)(timeout - ((now_usec - started_usec) / USEC_PER_MS)), tier);
- if(!base_rrdr) {
- info("Metric correlations: rrd2rrdr() failed for the baseline window on chart '%s'.", st->name);
+ if(rrdr_rows(r) < 2)
goto cleanup;
- }
- for(int i = 0; i < storage_tiers ;i++)
- stats->db_points_per_tier[i] += base_rrdr->internal.tier_points_read[i];
+ *entries = rrdr_rows(r);
+ ret = onewayalloc_mallocz(owa, sizeof(NETDATA_DOUBLE) * rrdr_rows(r));
- stats->db_points += base_rrdr->internal.db_points_read;
- stats->result_points += base_rrdr->internal.result_points_generated;
- if(!base_rrdr->d) {
- info("Metric correlations: rrd2rrdr() did not return any dimensions on chart '%s'.", st->name);
- goto cleanup;
- }
- if (base_rrdr->d != high_rrdr->d) {
- info("Cannot generate metric correlations for chart '%s' when the baseline and the highlight have different number of dimensions.", st->name);
- goto cleanup;
- }
- if(base_rrdr->result_options & RRDR_RESULT_OPTION_CANCEL) {
- info("Metric correlations: rrd2rrdr() on baseline window timed out '%s'.", st->name);
- goto cleanup;
- }
- int base_points = rrdr_rows(base_rrdr);
+ // copy the points of the dimension to a contiguous array
+ // there is no need to check for empty values, since empty values are already zero
+ // https://github.com/netdata/netdata/blob/6e3144683a73a2024d51425b20ecfd569034c858/web/api/queries/average/average.c#L41-L43
+ memcpy(ret, r->v, rrdr_rows(r) * sizeof(NETDATA_DOUBLE));
- now_usec = now_realtime_usec();
- if(now_usec - started_usec > timeout * USEC_PER_MS)
- goto cleanup;
+cleanup:
+ rrdr_free(owa, r);
+ return ret;
+}
+
+static void rrdset_metric_correlations_ks2(
+ RRDHOST *host,
+ RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria, RRDMETRIC_ACQUIRED *rma,
+ DICTIONARY *results,
+ time_t baseline_after, time_t baseline_before,
+ time_t after, time_t before,
+ size_t points, RRDR_OPTIONS options,
+ RRDR_GROUPING group_method, const char *group_options, size_t tier,
+ uint32_t shifts,
+ WEIGHTS_STATS *stats, bool register_zero
+ ) {
+
+ options |= RRDR_OPTION_NATURAL_POINTS;
- // we need at least 2 points to do the job
- if(base_points < 2 || high_points < 2)
+ ONEWAYALLOC *owa = onewayalloc_create(16 * 1024);
+
+ size_t high_points = 0;
+ NETDATA_DOUBLE *highlight = rrd2rrdr_ks2(
+ owa, host, rca, ria, rma, after, before, points,
+ options, group_method, group_options, tier, stats, &high_points);
+
+ if(!highlight)
goto cleanup;
- // for each dimension
- RRDDIM *d;
- int i;
- for(i = 0, d = base_rrdr->st->dimensions ; d && i < base_rrdr->d; i++, d = d->next) {
+ size_t base_points = 0;
+ NETDATA_DOUBLE *baseline = rrd2rrdr_ks2(
+ owa, host, rca, ria, rma, baseline_after, baseline_before, high_points << shifts,
+ options, group_method, group_options, tier, stats, &base_points);
+
+ if(!baseline)
+ goto cleanup;
- // skip the not evaluated ones
- if(unlikely(base_rrdr->od[i] & RRDR_DIMENSION_HIDDEN) || (high_rrdr->od[i] & RRDR_DIMENSION_HIDDEN))
- continue;
+ stats->binary_searches += 2 * (base_points - 1) + 2 * (high_points - 1);
- examined_dimensions++;
+ double prob = kstwo(baseline, (int)base_points, highlight, (int)high_points, shifts);
+ if(!isnan(prob) && !isinf(prob)) {
- // skip the dimensions that are just zero for both the baseline and the highlight
- if(unlikely(!(base_rrdr->od[i] & RRDR_DIMENSION_NONZERO) && !(high_rrdr->od[i] & RRDR_DIMENSION_NONZERO)))
- continue;
-
- // copy the baseline points of the dimension to a contiguous array
- // there is no need to check for empty values, since empty are already zero
- NETDATA_DOUBLE baseline[base_points];
- for(int c = 0; c < base_points; c++)
- baseline[c] = base_rrdr->v[ c * base_rrdr->d + i ];
-
- // copy the highlight points of the dimension to a contiguous array
- // there is no need to check for empty values, since empty values are already zero
- // https://github.com/netdata/netdata/blob/6e3144683a73a2024d51425b20ecfd569034c858/web/api/queries/average/average.c#L41-L43
- NETDATA_DOUBLE highlight[high_points];
- for(int c = 0; c < high_points; c++)
- highlight[c] = high_rrdr->v[ c * high_rrdr->d + i ];
-
- stats->binary_searches += 2 * (base_points - 1) + 2 * (high_points - 1);
-
- double prob = kstwo(baseline, base_points, highlight, high_points, shifts);
- if(!isnan(prob) && !isinf(prob)) {
-
- // these conditions should never happen, but still let's check
- if(unlikely(prob < 0.0)) {
- error("Metric correlations: kstwo() returned a negative number: %f", prob);
- prob = -prob;
- }
- if(unlikely(prob > 1.0)) {
- error("Metric correlations: kstwo() returned a number above 1.0: %f", prob);
- prob = 1.0;
- }
-
- // to spread the results evenly, 0.0 needs to be the less correlated and 1.0 the most correlated
- // so we flip the result of kstwo()
- register_result(results, base_rrdr->st, d, 1.0 - prob, RESULT_IS_BASE_HIGH_RATIO, stats, register_zero);
+ // these conditions should never happen, but still let's check
+ if(unlikely(prob < 0.0)) {
+ error("Metric correlations: kstwo() returned a negative number: %f", prob);
+ prob = -prob;
+ }
+ if(unlikely(prob > 1.0)) {
+ error("Metric correlations: kstwo() returned a number above 1.0: %f", prob);
+ prob = 1.0;
}
+
+ // to spread the results evenly, 0.0 needs to be the less correlated and 1.0 the most correlated
+ // so, we flip the result of kstwo()
+ register_result(results, rca, ria, rma, 1.0 - prob, RESULT_IS_BASE_HIGH_RATIO, stats, register_zero);
}
cleanup:
- rrdr_free(owa, high_rrdr);
- rrdr_free(owa, base_rrdr);
onewayalloc_destroy(owa);
- return examined_dimensions;
}
// ----------------------------------------------------------------------------
// VOLUME algorithm functions
-static int rrdset_metric_correlations_volume(RRDSET *st, DICTIONARY *results,
- long long baseline_after, long long baseline_before,
- long long after, long long before,
- RRDR_OPTIONS options, RRDR_GROUPING group, const char *group_options,
- int tier, int timeout,
- WEIGHTS_STATS *stats, bool register_zero) {
+static void merge_query_value_to_stats(QUERY_VALUE *qv, WEIGHTS_STATS *stats) {
+ stats->db_queries++;
+ stats->result_points += qv->result_points;
+ stats->db_points += qv->points_read;
+ for(size_t tier = 0; tier < storage_tiers ; tier++)
+ stats->db_points_per_tier[tier] += qv->storage_points_per_tier[tier];
+}
+
+static void rrdset_metric_correlations_volume(
+ RRDHOST *host,
+ RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria, RRDMETRIC_ACQUIRED *rma,
+ DICTIONARY *results,
+ time_t baseline_after, time_t baseline_before,
+ time_t after, time_t before,
+ RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options,
+ size_t tier,
+ WEIGHTS_STATS *stats, bool register_zero) {
options |= RRDR_OPTION_MATCH_IDS | RRDR_OPTION_ABSOLUTE | RRDR_OPTION_NATURAL_POINTS;
- long group_time = 0;
-
- int examined_dimensions = 0;
- int ret, value_is_null;
- usec_t started_usec = now_realtime_usec();
- RRDDIM *d;
- for(d = st->dimensions; d ; d = d->next) {
- usec_t now_usec = now_realtime_usec();
- if(now_usec - started_usec > timeout * USEC_PER_MS)
- return examined_dimensions;
+ QUERY_VALUE baseline_average = rrdmetric2value(host, rca, ria, rma, baseline_after, baseline_before, options, group_method, group_options, tier, 0, QUERY_SOURCE_API_WEIGHTS);
+ merge_query_value_to_stats(&baseline_average, stats);
- // we count how many metrics we evaluated
- examined_dimensions++;
+ if(!netdata_double_isnumber(baseline_average.value)) {
+ // this means no data for the baseline window, but we may have data for the highlighted one - assume zero
+ baseline_average.value = 0.0;
+ }
- // there is no point to pass a timeout to these queries
- // since the query engine checks for a timeout between
- // dimensions, and we query a single dimension at a time.
-
- stats->db_queries++;
- NETDATA_DOUBLE baseline_average = NAN;
- NETDATA_DOUBLE base_anomaly_rate = 0;
- value_is_null = 1;
- ret = rrdset2value_api_v1(st, NULL, &baseline_average, d->id, 1,
- baseline_after, baseline_before,
- group, group_options, group_time, options,
- NULL, NULL,
- &stats->db_points, stats->db_points_per_tier,
- &stats->result_points,
- &value_is_null, &base_anomaly_rate, 0, tier);
-
- if(ret != HTTP_RESP_OK || value_is_null || !netdata_double_isnumber(baseline_average)) {
- // this means no data for the baseline window, but we may have data for the highlighted one - assume zero
- baseline_average = 0.0;
- }
+ QUERY_VALUE highlight_average = rrdmetric2value(host, rca, ria, rma, after, before, options, group_method, group_options, tier, 0, QUERY_SOURCE_API_WEIGHTS);
+ merge_query_value_to_stats(&highlight_average, stats);
- stats->db_queries++;
- NETDATA_DOUBLE highlight_average = NAN;
- NETDATA_DOUBLE high_anomaly_rate = 0;
- value_is_null = 1;
- ret = rrdset2value_api_v1(st, NULL, &highlight_average, d->id, 1,
- after, before,
- group, group_options, group_time, options,
- NULL, NULL,
- &stats->db_points, stats->db_points_per_tier,
- &stats->result_points,
- &value_is_null, &high_anomaly_rate, 0, tier);
-
- if(ret != HTTP_RESP_OK || value_is_null || !netdata_double_isnumber(highlight_average)) {
- // this means no data for the highlighted duration - so skip it
- continue;
- }
+ if(!netdata_double_isnumber(highlight_average.value))
+ return;
- if(baseline_average == highlight_average) {
- // they are the same - let's move on
- continue;
- }
+ if(baseline_average.value == highlight_average.value) {
+ // they are the same - let's move on
+ return;
+ }
- stats->db_queries++;
- NETDATA_DOUBLE highlight_countif = NAN;
- value_is_null = 1;
-
- char highlighted_countif_options[50 + 1];
- snprintfz(highlighted_countif_options, 50, "%s" NETDATA_DOUBLE_FORMAT, highlight_average < baseline_average ? "<":">", baseline_average);
-
- ret = rrdset2value_api_v1(st, NULL, &highlight_countif, d->id, 1,
- after, before,
- RRDR_GROUPING_COUNTIF,highlighted_countif_options,
- group_time, options,
- NULL, NULL,
- &stats->db_points, stats->db_points_per_tier,
- &stats->result_points,
- &value_is_null, NULL, 0, tier);
-
- if(ret != HTTP_RESP_OK || value_is_null || !netdata_double_isnumber(highlight_countif)) {
- info("MC: highlighted countif query failed, but highlighted average worked - strange...");
- continue;
- }
+ char highlight_countif_options[50 + 1];
+ snprintfz(highlight_countif_options, 50, "%s" NETDATA_DOUBLE_FORMAT, highlight_average.value < baseline_average.value ? "<" : ">", baseline_average.value);
+ QUERY_VALUE highlight_countif = rrdmetric2value(host, rca, ria, rma, after, before, options, RRDR_GROUPING_COUNTIF, highlight_countif_options, tier, 0, QUERY_SOURCE_API_WEIGHTS);
+ merge_query_value_to_stats(&highlight_countif, stats);
- // this represents the percentage of time
- // the highlighted window was above/below the baseline window
- // (above or below depending on their averages)
- highlight_countif = highlight_countif / 100.0; // countif returns 0 - 100.0
+ if(!netdata_double_isnumber(highlight_countif.value)) {
+ info("WEIGHTS: highlighted countif query failed, but highlighted average worked - strange...");
+ return;
+ }
- RESULT_FLAGS flags;
- NETDATA_DOUBLE pcent = NAN;
- if(isgreater(baseline_average, 0.0) || isless(baseline_average, 0.0)) {
- flags = RESULT_IS_BASE_HIGH_RATIO;
- pcent = (highlight_average - baseline_average) / baseline_average * highlight_countif;
- }
- else {
- flags = RESULT_IS_PERCENTAGE_OF_TIME;
- pcent = highlight_countif;
- }
+ // this represents the percentage of time
+ // the highlighted window was above/below the baseline window
+ // (above or below depending on their averages)
+ highlight_countif.value = highlight_countif.value / 100.0; // countif returns 0 - 100.0
- register_result(results, st, d, pcent, flags, stats, register_zero);
+ RESULT_FLAGS flags;
+ NETDATA_DOUBLE pcent = NAN;
+ if(isgreater(baseline_average.value, 0.0) || isless(baseline_average.value, 0.0)) {
+ flags = RESULT_IS_BASE_HIGH_RATIO;
+ pcent = (highlight_average.value - baseline_average.value) / baseline_average.value * highlight_countif.value;
+ }
+ else {
+ flags = RESULT_IS_PERCENTAGE_OF_TIME;
+ pcent = highlight_countif.value;
}
- return examined_dimensions;
+ register_result(results, rca, ria, rma, pcent, flags, stats, register_zero);
}
// ----------------------------------------------------------------------------
// ANOMALY RATE algorithm functions
-static int rrdset_weights_anomaly_rate(RRDSET *st, DICTIONARY *results,
- long long after, long long before,
- RRDR_OPTIONS options, RRDR_GROUPING group, const char *group_options,
- int tier, int timeout,
- WEIGHTS_STATS *stats, bool register_zero) {
+static void rrdset_weights_anomaly_rate(
+ RRDHOST *host,
+ RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria, RRDMETRIC_ACQUIRED *rma,
+ DICTIONARY *results,
+ time_t after, time_t before,
+ RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options,
+ size_t tier,
+ WEIGHTS_STATS *stats, bool register_zero) {
options |= RRDR_OPTION_MATCH_IDS | RRDR_OPTION_ANOMALY_BIT | RRDR_OPTION_NATURAL_POINTS;
- long group_time = 0;
- int examined_dimensions = 0;
- int ret, value_is_null;
- usec_t started_usec = now_realtime_usec();
-
- RRDDIM *d;
- for(d = st->dimensions; d ; d = d->next) {
- usec_t now_usec = now_realtime_usec();
- if(now_usec - started_usec > timeout * USEC_PER_MS)
- return examined_dimensions;
+ QUERY_VALUE qv = rrdmetric2value(host, rca, ria, rma, after, before, options, group_method, group_options, tier, 0, QUERY_SOURCE_API_WEIGHTS);
+ merge_query_value_to_stats(&qv, stats);
- // we count how many metrics we evaluated
- examined_dimensions++;
-
- // there is no point to pass a timeout to these queries
- // since the query engine checks for a timeout between
- // dimensions, and we query a single dimension at a time.
-
- stats->db_queries++;
- NETDATA_DOUBLE average = NAN;
- NETDATA_DOUBLE anomaly_rate = 0;
- value_is_null = 1;
- ret = rrdset2value_api_v1(st, NULL, &average, d->id, 1,
- after, before,
- group, group_options, group_time, options,
- NULL, NULL,
- &stats->db_points, stats->db_points_per_tier,
- &stats->result_points,
- &value_is_null, &anomaly_rate, 0, tier);
-
- if(ret == HTTP_RESP_OK || !value_is_null || netdata_double_isnumber(average))
- register_result(results, st, d, average, 0, stats, register_zero);
- }
-
- return examined_dimensions;
+ if(netdata_double_isnumber(qv.value))
+ register_result(results, rca, ria, rma, qv.value, 0, stats, register_zero);
}
// ----------------------------------------------------------------------------
@@ -853,7 +742,7 @@ static size_t spread_results_evenly(DICTIONARY *results, WEIGHTS_STATS *stats) {
struct register_result *t;
// count the dimensions
- size_t dimensions = dictionary_stats_entries(results);
+ size_t dimensions = dictionary_entries(results);
if(!dimensions) return 0;
if(stats->max_base_high_ratio == 0.0)
@@ -903,15 +792,17 @@ static size_t spread_results_evenly(DICTIONARY *results, WEIGHTS_STATS *stats) {
// ----------------------------------------------------------------------------
// The main function
-int web_api_v1_weights(RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS_FORMAT format,
- RRDR_GROUPING group, const char *group_options,
- long long baseline_after, long long baseline_before,
- long long after, long long before,
- long long points, RRDR_OPTIONS options, SIMPLE_PATTERN *contexts, int tier, int timeout) {
+int web_api_v1_weights(
+ RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS_FORMAT format,
+ RRDR_GROUPING group, const char *group_options,
+ time_t baseline_after, time_t baseline_before,
+ time_t after, time_t before,
+ size_t points, RRDR_OPTIONS options, SIMPLE_PATTERN *contexts, size_t tier, size_t timeout) {
+
WEIGHTS_STATS stats = {};
DICTIONARY *results = register_result_init();
- DICTIONARY *charts = dictionary_create(DICTIONARY_FLAG_SINGLE_THREADED|DICTIONARY_FLAG_VALUE_LINK_DONT_CLONE);;
+ DICTIONARY *metrics = NULL;
char *error = NULL;
int resp = HTTP_RESP_OK;
@@ -1000,20 +891,7 @@ int web_api_v1_weights(RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS
baseline_after = baseline_before - (high_delta << shifts);
}
- // dont lock here and wait for results
- // get the charts and run mc after
- RRDSET *st;
- rrdhost_rdlock(host);
- rrdset_foreach_read(st, host) {
- if (rrdset_is_available_for_viewers(st)) {
- if(!contexts || simple_pattern_matches(contexts, st->context))
- dictionary_set(charts, st->name, NULL, 0);
- }
- }
- rrdhost_unlock(host);
-
size_t examined_dimensions = 0;
- void *ptr;
bool register_zero = true;
if(options & RRDR_OPTION_NONZERO) {
@@ -1021,8 +899,11 @@ int web_api_v1_weights(RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS
options &= ~RRDR_OPTION_NONZERO;
}
- // for every chart in the dictionary
- dfe_start_read(charts, ptr) {
+ metrics = rrdcontext_all_metrics_to_dict(host, contexts);
+ struct metric_entry *me;
+
+ // for every metric_entry in the dictionary
+ dfe_start_read(metrics, me) {
usec_t now_usec = now_realtime_usec();
if(now_usec - started_usec > timeout_usec) {
error = "timed out";
@@ -1030,46 +911,48 @@ int web_api_v1_weights(RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS
goto cleanup;
}
- st = rrdset_find_byname(host, ptr_name);
- if(!st) continue;
-
- rrdset_rdlock(st);
+ examined_dimensions++;
switch(method) {
case WEIGHTS_METHOD_ANOMALY_RATE:
options |= RRDR_OPTION_ANOMALY_BIT;
- points = 1;
- examined_dimensions += rrdset_weights_anomaly_rate(st, results,
- after, before,
- options, group, group_options, tier,
- (int)(timeout - ((now_usec - started_usec) / USEC_PER_MS)),
- &stats, register_zero);
+ rrdset_weights_anomaly_rate(
+ host,
+ me->rca, me->ria, me->rma,
+ results,
+ after, before,
+ options, group, group_options, tier,
+ &stats, register_zero
+ );
break;
case WEIGHTS_METHOD_MC_VOLUME:
- points = 1;
- examined_dimensions += rrdset_metric_correlations_volume(st, results,
- baseline_after, baseline_before,
- after, before,
- options, group, group_options, tier,
- (int)(timeout - ((now_usec - started_usec) / USEC_PER_MS)),
- &stats, register_zero);
+ rrdset_metric_correlations_volume(
+ host,
+ me->rca, me->ria, me->rma,
+ results,
+ baseline_after, baseline_before,
+ after, before,
+ options, group, group_options, tier,
+ &stats, register_zero
+ );
break;
default:
case WEIGHTS_METHOD_MC_KS2:
- examined_dimensions += rrdset_metric_correlations_ks2(st, results,
- baseline_after, baseline_before,
- after, before,
- points, options, group, group_options, tier, shifts,
- (int)(timeout - ((now_usec - started_usec) / USEC_PER_MS)),
- &stats, register_zero);
+ rrdset_metric_correlations_ks2(
+ host,
+ me->rca, me->ria, me->rma,
+ results,
+ baseline_after, baseline_before,
+ after, before, points,
+ options, group, group_options, tier, shifts,
+ &stats, register_zero
+ );
break;
}
-
- rrdset_unlock(st);
}
- dfe_done(ptr);
+ dfe_done(me);
if(!register_zero)
options |= RRDR_OPTION_NONZERO;
@@ -1085,22 +968,26 @@ int web_api_v1_weights(RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS
size_t added_dimensions = 0;
switch(format) {
case WEIGHTS_FORMAT_CHARTS:
- added_dimensions = registered_results_to_json_charts(results, wb,
- after, before,
- baseline_after, baseline_before,
- points, method, group, options, shifts,
- examined_dimensions,
- ended_usec - started_usec, &stats);
+ added_dimensions =
+ registered_results_to_json_charts(
+ results, wb,
+ after, before,
+ baseline_after, baseline_before,
+ points, method, group, options, shifts,
+ examined_dimensions,
+ ended_usec - started_usec, &stats);
break;
default:
case WEIGHTS_FORMAT_CONTEXTS:
- added_dimensions = registered_results_to_json_contexts(results, wb,
- after, before,
- baseline_after, baseline_before,
- points, method, group, options, shifts,
- examined_dimensions,
- ended_usec - started_usec, &stats);
+ added_dimensions =
+ registered_results_to_json_contexts(
+ results, wb,
+ after, before,
+ baseline_after, baseline_before,
+ points, method, group, options, shifts,
+ examined_dimensions,
+ ended_usec - started_usec, &stats);
break;
}
@@ -1110,7 +997,7 @@ int web_api_v1_weights(RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS
}
cleanup:
- if(charts) dictionary_destroy(charts);
+ if(metrics) dictionary_destroy(metrics);
if(results) register_result_destroy(results);
if(error) {
diff --git a/web/api/queries/weights.h b/web/api/queries/weights.h
index f88a134f2..50d8634ef 100644
--- a/web/api/queries/weights.h
+++ b/web/api/queries/weights.h
@@ -20,14 +20,14 @@ extern int enable_metric_correlations;
extern int metric_correlations_version;
extern WEIGHTS_METHOD default_metric_correlations_method;
-extern int web_api_v1_weights (RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS_FORMAT format,
+int web_api_v1_weights (RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS_FORMAT format,
RRDR_GROUPING group, const char *group_options,
- long long baseline_after, long long baseline_before,
- long long after, long long before,
- long long points, RRDR_OPTIONS options, SIMPLE_PATTERN *contexts, int tier, int timeout);
+ time_t baseline_after, time_t baseline_before,
+ time_t after, time_t before,
+ size_t points, RRDR_OPTIONS options, SIMPLE_PATTERN *contexts, size_t tier, size_t timeout);
-extern WEIGHTS_METHOD weights_string_to_method(const char *method);
-extern const char *weights_method_to_string(WEIGHTS_METHOD method);
-extern int mc_unittest(void);
+WEIGHTS_METHOD weights_string_to_method(const char *method);
+const char *weights_method_to_string(WEIGHTS_METHOD method);
+int mc_unittest(void);
#endif //NETDATA_API_WEIGHTS_H
diff --git a/web/api/tests/valid_urls.c b/web/api/tests/valid_urls.c
index 91cd19b09..8a2a87f10 100644
--- a/web/api/tests/valid_urls.c
+++ b/web/api/tests/valid_urls.c
@@ -19,19 +19,6 @@ void *__wrap_free_temporary_host(RRDHOST *host)
return NULL;
}
-
-RRDHOST *sql_create_host_by_uuid(char *hostname)
-{
- (void) hostname;
- return NULL;
-}
-
-RRDHOST *__wrap_sql_create_host_by_uuid(char *hostname)
-{
- (void) hostname;
- return NULL;
-}
-
void repr(char *result, int result_size, char const *buf, int size)
{
int n;
diff --git a/web/api/tests/web_api.c b/web/api/tests/web_api.c
index fd9a86ef6..93e6454ee 100644
--- a/web/api/tests/web_api.c
+++ b/web/api/tests/web_api.c
@@ -19,18 +19,6 @@ void *__wrap_free_temporary_host(RRDHOST *host)
return NULL;
}
-RRDHOST *sql_create_host_by_uuid(char *hostname)
-{
- (void) hostname;
- return NULL;
-}
-
-RRDHOST *__wrap_sql_create_host_by_uuid(char *hostname)
-{
- (void) hostname;
- return NULL;
-}
-
void repr(char *result, int result_size, char const *buf, int size)
{
int n;
diff --git a/web/api/web_api_v1.c b/web/api/web_api_v1.c
index 8bfc617fd..93f501f9e 100644
--- a/web/api/web_api_v1.c
+++ b/web/api/web_api_v1.c
@@ -34,20 +34,20 @@ static struct {
, {"match-ids" , 0 , RRDR_OPTION_MATCH_IDS}
, {"match_names" , 0 , RRDR_OPTION_MATCH_NAMES}
, {"match-names" , 0 , RRDR_OPTION_MATCH_NAMES}
- , {"showcustomvars" , 0 , RRDR_OPTION_CUSTOM_VARS}
, {"anomaly-bit" , 0 , RRDR_OPTION_ANOMALY_BIT}
, {"selected-tier" , 0 , RRDR_OPTION_SELECTED_TIER}
, {"raw" , 0 , RRDR_OPTION_RETURN_RAW}
, {"jw-anomaly-rates" , 0 , RRDR_OPTION_RETURN_JWAR}
, {"natural-points" , 0 , RRDR_OPTION_NATURAL_POINTS}
, {"virtual-points" , 0 , RRDR_OPTION_VIRTUAL_POINTS}
+ , {"all-dimensions" , 0 , RRDR_OPTION_ALL_DIMENSIONS}
, {NULL , 0 , 0}
};
static struct {
const char *name;
uint32_t hash;
- uint32_t value;
+ DATASOURCE_FORMAT value;
} api_v1_data_formats[] = {
{ DATASOURCE_FORMAT_DATATABLE_JSON , 0 , DATASOURCE_DATATABLE_JSON}
, {DATASOURCE_FORMAT_DATATABLE_JSONP, 0 , DATASOURCE_DATATABLE_JSONP}
@@ -68,7 +68,7 @@ static struct {
static struct {
const char *name;
uint32_t hash;
- uint32_t value;
+ DATASOURCE_FORMAT value;
} api_v1_data_google_formats[] = {
// this is not error - when google requests json, it expects javascript
// https://developers.google.com/chart/interactive/docs/dev/implementing_data_source#responseformat
@@ -185,20 +185,46 @@ inline RRDR_OPTIONS web_client_api_request_v1_data_options(char *o) {
return ret;
}
-void web_client_api_request_v1_data_options_to_string(BUFFER *wb, RRDR_OPTIONS options) {
+void web_client_api_request_v1_data_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options) {
RRDR_OPTIONS used = 0; // to prevent adding duplicates
int added = 0;
for(int i = 0; api_v1_data_options[i].name ; i++) {
if (unlikely((api_v1_data_options[i].value & options) && !(api_v1_data_options[i].value & used))) {
+ const char *name = api_v1_data_options[i].name;
+ used |= api_v1_data_options[i].value;
+
if(added) buffer_strcat(wb, ",");
- buffer_strcat(wb, api_v1_data_options[i].name);
+ buffer_strcat(wb, name);
+
+ added++;
+ }
+ }
+}
+
+void web_client_api_request_v1_data_options_to_string(char *buf, size_t size, RRDR_OPTIONS options) {
+ char *write = buf;
+ char *end = &buf[size - 1];
+
+ RRDR_OPTIONS used = 0; // to prevent adding duplicates
+ int added = 0;
+ for(int i = 0; api_v1_data_options[i].name ; i++) {
+ if (unlikely((api_v1_data_options[i].value & options) && !(api_v1_data_options[i].value & used))) {
+ const char *name = api_v1_data_options[i].name;
used |= api_v1_data_options[i].value;
+
+ if(added && write < end)
+ *write++ = ',';
+
+ while(*name && write < end)
+ *write++ = *name++;
+
added++;
}
}
+ *write = *end = '\0';
}
-inline uint32_t web_client_api_request_v1_data_format(char *name) {
+inline DATASOURCE_FORMAT web_client_api_request_v1_data_format(char *name) {
uint32_t hash = simple_hash(name);
int i;
@@ -584,16 +610,14 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
char *group_time_str = NULL;
char *points_str = NULL;
char *timeout_str = NULL;
- char *max_anomaly_rates_str = NULL;
char *context = NULL;
char *chart_label_key = NULL;
char *chart_labels_filter = NULL;
char *group_options = NULL;
- int tier = 0;
- int group = RRDR_GROUPING_AVERAGE;
- int show_dimensions = 0;
- uint32_t format = DATASOURCE_JSON;
- uint32_t options = 0x00000000;
+ size_t tier = 0;
+ RRDR_GROUPING group = RRDR_GROUPING_AVERAGE;
+ DATASOURCE_FORMAT format = DATASOURCE_JSON;
+ RRDR_OPTIONS options = 0;
while(url) {
char *value = mystrsep(&url, "&");
@@ -617,7 +641,7 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
buffer_strcat(dimensions, "|");
buffer_strcat(dimensions, value);
}
- else if(!strcmp(name, "show_dimensions")) show_dimensions = 1;
+ else if(!strcmp(name, "show_dimensions")) options |= RRDR_OPTION_ALL_DIMENSIONS;
else if(!strcmp(name, "after")) after_str = value;
else if(!strcmp(name, "before")) before_str = value;
else if(!strcmp(name, "points")) points_str = value;
@@ -670,13 +694,12 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
outFileName = tqx_value;
}
}
- else if(!strcmp(name, "max_anomaly_rates")) {
- max_anomaly_rates_str = value;
- }
else if(!strcmp(name, "tier")) {
- tier = str2i(value);
- if(tier >= 0 && tier < storage_tiers)
+ tier = str2ul(value);
+ if(tier < storage_tiers)
options |= RRDR_OPTION_SELECTED_TIER;
+ else
+ tier = 0;
}
}
@@ -690,81 +713,17 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
RRDSET *st = NULL;
ONEWAYALLOC *owa = onewayalloc_create(0);
+ QUERY_TARGET *qt = NULL;
- if((!chart || !*chart) && (!context)) {
- buffer_sprintf(w->response.data, "No chart id is given at the request.");
+ if(!is_valid_sp(chart) && !is_valid_sp(context)) {
+ buffer_sprintf(w->response.data, "No chart or context is given.");
goto cleanup;
}
- struct context_param *context_param_list = NULL;
-
- if (context && !chart) {
- RRDSET *st1;
-
- uint32_t context_hash = simple_hash(context);
-
- SIMPLE_PATTERN *chart_label_key_pattern = NULL;
- if(chart_label_key)
- chart_label_key_pattern = simple_pattern_create(chart_label_key, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
-
- SIMPLE_PATTERN *chart_labels_filter_pattern = NULL;
- if(chart_labels_filter)
- chart_labels_filter_pattern = simple_pattern_create(chart_labels_filter, ",|\t\r\n\f\v", SIMPLE_PATTERN_EXACT);
-
- rrdhost_rdlock(host);
- rrdset_foreach_read(st1, host) {
- if (st1->hash_context == context_hash && !strcmp(st1->context, context) &&
- (!chart_label_key_pattern || rrdlabels_match_simple_pattern_parsed(st1->state->chart_labels, chart_label_key_pattern, ':')) &&
- (!chart_labels_filter_pattern || rrdlabels_match_simple_pattern_parsed(st1->state->chart_labels, chart_labels_filter_pattern, ':')))
- build_context_param_list(owa, &context_param_list, st1);
- }
- rrdhost_unlock(host);
-
- if (likely(context_param_list && context_param_list->rd)) // Just set the first one
- st = context_param_list->rd->rrdset;
- else {
- if (!chart_label_key && !chart_labels_filter)
- sql_build_context_param_list(owa, &context_param_list, host, context, NULL);
- }
- }
- else {
+ if(chart && !context) {
+ // check if this is a specific chart
st = rrdset_find(host, chart);
- if (!st)
- st = rrdset_find_byname(host, chart);
- if (likely(st))
- st->last_accessed_time = now_realtime_sec();
- else
- sql_build_context_param_list(owa, &context_param_list, host, NULL, chart);
- }
-
- if (!st) {
- if (likely(context_param_list && context_param_list->rd && context_param_list->rd->rrdset))
- st = context_param_list->rd->rrdset;
- else {
- free_context_param_list(owa, &context_param_list);
- context_param_list = NULL;
- }
- }
-
- if (!st && !context_param_list) {
- if (context && !chart) {
- if (!chart_label_key) {
- buffer_strcat(w->response.data, "Context is not found: ");
- buffer_strcat_htmlescape(w->response.data, context);
- } else {
- buffer_strcat(w->response.data, "Context: ");
- buffer_strcat_htmlescape(w->response.data, context);
- buffer_strcat(w->response.data, " or chart label key: ");
- buffer_strcat_htmlescape(w->response.data, chart_label_key);
- buffer_strcat(w->response.data, " not found");
- }
- }
- else {
- buffer_strcat(w->response.data, "Chart is not found: ");
- buffer_strcat_htmlescape(w->response.data, chart);
- }
- ret = HTTP_RESP_NOT_FOUND;
- goto cleanup;
+ if (!st) st = rrdset_find_byname(host, chart);
}
long long before = (before_str && *before_str)?str2l(before_str):0;
@@ -772,7 +731,35 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
int points = (points_str && *points_str)?str2i(points_str):0;
int timeout = (timeout_str && *timeout_str)?str2i(timeout_str): 0;
long group_time = (group_time_str && *group_time_str)?str2l(group_time_str):0;
- int max_anomaly_rates = (max_anomaly_rates_str && *max_anomaly_rates_str) ? str2i(max_anomaly_rates_str) : 0;
+
+ QUERY_TARGET_REQUEST qtr = {
+ .after = after,
+ .before = before,
+ .host = host,
+ .st = st,
+ .hosts = NULL,
+ .contexts = context,
+ .charts = chart,
+ .dimensions = (dimensions)?buffer_tostring(dimensions):NULL,
+ .timeout = timeout,
+ .points = points,
+ .format = format,
+ .options = options,
+ .group_method = group,
+ .group_options = group_options,
+ .resampling_time = group_time,
+ .tier = tier,
+ .chart_label_key = chart_label_key,
+ .charts_labels_filter = chart_labels_filter,
+ .query_source = QUERY_SOURCE_API_DATA,
+ };
+ qt = query_target_create(&qtr);
+
+ if(!qt || !qt->query.used) {
+ buffer_sprintf(w->response.data, "No metrics where matched to query.");
+ ret = HTTP_RESP_NOT_FOUND;
+ goto cleanup;
+ }
if (timeout) {
struct timeval now;
@@ -782,21 +769,13 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
if (timeout <= 0) {
buffer_flush(w->response.data);
buffer_strcat(w->response.data, "Query timeout exceeded");
- return HTTP_RESP_BACKEND_FETCH_FAILED;
+ ret = HTTP_RESP_BACKEND_FETCH_FAILED;
+ goto cleanup;
}
}
- debug(D_WEB_CLIENT, "%llu: API command 'data' for chart '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%d', format '%u', options '0x%08x'"
- , w->id
- , chart
- , (dimensions)?buffer_tostring(dimensions):""
- , after
- , before
- , points
- , group
- , format
- , options
- );
+ debug(D_WEB_CLIENT, "%llu: API command 'data' for chart '%s', dimensions '%s', after '%lld', before '%lld', points '%d', group '%u', format '%u', options '0x%08x'"
+ , w->id, chart, (dimensions)?buffer_tostring(dimensions):"", after, before , points, group, format, options);
if(outFileName && *outFileName) {
buffer_sprintf(w->response.header, "Content-Disposition: attachment; filename=\"%s\"\r\n", outFileName);
@@ -827,18 +806,7 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
buffer_strcat(w->response.data, "(");
}
- QUERY_PARAMS query_params = {
- .context_param_list = context_param_list,
- .timeout = timeout,
- .max_anomaly_rates = max_anomaly_rates,
- .show_dimensions = show_dimensions,
- .chart_label_key = chart_label_key,
- .wb = w->response.data};
-
- ret = rrdset2anything_api_v1(owa, st, &query_params, dimensions, format,
- points, after, before, group, group_options, group_time, options, &last_timestamp_in_data, tier);
-
- free_context_param_list(owa, &context_param_list);
+ ret = data_query_execute(owa, w->response.data, qt, &last_timestamp_in_data);
if(format == DATASOURCE_DATATABLE_JSONP) {
if(google_timestamp < last_timestamp_in_data)
@@ -856,6 +824,10 @@ inline int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, c
buffer_strcat(w->response.data, ");");
cleanup:
+ if(qt && qt->used) {
+ internal_error(true, "QUERY_TARGET: left non-released on query '%s'", qt->id);
+ query_target_release(qt);
+ }
onewayalloc_destroy(owa);
buffer_free(dimensions);
return ret;
@@ -1054,8 +1026,7 @@ inline int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *
static inline void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST *host, BUFFER *wb) {
int alarm_normal = 0, alarm_warn = 0, alarm_crit = 0;
RRDCALC *rc;
- rrdhost_rdlock(host);
- for(rc = host->alarms; rc ; rc = rc->next) {
+ foreach_rrdcalc_in_rrdhost_read(host, rc) {
if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
continue;
@@ -1070,7 +1041,7 @@ static inline void web_client_api_request_v1_info_summary_alarm_statuses(RRDHOST
alarm_normal++;
}
}
- rrdhost_unlock(host);
+ foreach_rrdcalc_in_rrdhost_done(rc);
buffer_sprintf(wb, "\t\t\"normal\": %d,\n", alarm_normal);
buffer_sprintf(wb, "\t\t\"warning\": %d,\n", alarm_warn);
buffer_sprintf(wb, "\t\t\"critical\": %d\n", alarm_crit);
@@ -1086,7 +1057,7 @@ static inline void web_client_api_request_v1_info_mirrored_hosts(BUFFER *wb) {
if (count > 0)
buffer_strcat(wb, ",\n");
- buffer_sprintf(wb, "\t\t\"%s\"", host->hostname);
+ buffer_sprintf(wb, "\t\t\"%s\"", rrdhost_hostname(host));
count++;
}
@@ -1101,7 +1072,7 @@ static inline void web_client_api_request_v1_info_mirrored_hosts(BUFFER *wb) {
buffer_sprintf(
wb, "\t\t{ \"guid\": \"%s\", \"hostname\": \"%s\", \"reachable\": %s, \"hops\": %d"
, host->machine_guid
- , host->hostname
+ , rrdhost_hostname(host)
, (host->receiver || host == localhost) ? "true" : "false"
, host->system_info ? host->system_info->hops : (host == localhost) ? 0 : 1
);
@@ -1140,7 +1111,7 @@ inline void host_labels2json(RRDHOST *host, BUFFER *wb, size_t indentation) {
indentation--;
}
- rrdlabels_to_buffer(host->host_labels, wb, tabs, ":", "\"", ",\n", NULL, NULL, NULL, NULL);
+ rrdlabels_to_buffer(host->rrdlabels, wb, tabs, ":", "\"", ",\n", NULL, NULL, NULL, NULL);
buffer_strcat(wb, "\n");
}
@@ -1148,7 +1119,7 @@ extern int aclk_connected;
inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb)
{
buffer_strcat(wb, "{\n");
- buffer_sprintf(wb, "\t\"version\": \"%s\",\n", host->program_version);
+ buffer_sprintf(wb, "\t\"version\": \"%s\",\n", rrdhost_program_version(host));
buffer_sprintf(wb, "\t\"uid\": \"%s\",\n", host->machine_guid);
web_client_api_request_v1_info_mirrored_hosts(wb);
@@ -1202,6 +1173,10 @@ inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb)
host_labels2json(host, wb, 2);
buffer_strcat(wb, "\t},\n");
+ buffer_strcat(wb, "\t\"functions\": {\n");
+ host_functions2json(host, wb, 2, "\"", "\"");
+ buffer_strcat(wb, "\t},\n");
+
buffer_strcat(wb, "\t\"collectors\": [");
chartcollectors2json(host, wb);
buffer_strcat(wb, "\n\t],\n");
@@ -1215,14 +1190,8 @@ inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb)
#ifdef ENABLE_ACLK
buffer_strcat(wb, "\t\"cloud-available\": true,\n");
- buffer_strcat(wb, "\t\"aclk-ng-available\": true,\n");
- buffer_strcat(wb, "\t\"aclk-ng-new-cloud-protocol\": true,\n");
- buffer_strcat(wb, "\t\"aclk-legacy-available\": false,\n");
- buffer_strcat(wb, "\t\"aclk-implementation\": \"Next Generation\",\n");
#else
buffer_strcat(wb, "\t\"cloud-available\": false,\n");
- buffer_strcat(wb, "\t\"aclk-ng-available\": false,\n");
- buffer_strcat(wb, "\t\"aclk-legacy-available\": false,\n");
#endif
char *agent_id = get_agent_claimid();
if (agent_id == NULL)
@@ -1234,11 +1203,10 @@ inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb)
#ifdef ENABLE_ACLK
if (aclk_connected) {
buffer_strcat(wb, "\t\"aclk-available\": true,\n");
- buffer_strcat(wb, "\t\"aclk-available-protocol\": \"New\",\n");
}
else
#endif
- buffer_strcat(wb, "\t\"aclk-available\": false,\n\t\"aclk-available-protocol\": null,\n"); // Intentionally valid with/without #ifdef above
+ buffer_strcat(wb, "\t\"aclk-available\": false,\n"); // Intentionally valid with/without #ifdef above
buffer_strcat(wb, "\t\"memory-mode\": ");
analytics_get_data(analytics_data.netdata_config_memory_mode, wb);
@@ -1259,7 +1227,7 @@ inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb)
#ifdef ENABLE_COMPRESSION
if(host->sender){
buffer_strcat(wb, "\t\"stream-compression\": ");
- buffer_strcat(wb, (host->sender->rrdpush_compression ? "true" : "false"));
+ buffer_strcat(wb, stream_has_capability(host->sender, STREAM_CAP_COMPRESSION) ? "true" : "false");
buffer_strcat(wb, ",\n");
}else{
buffer_strcat(wb, "\t\"stream-compression\": null,\n");
@@ -1330,7 +1298,7 @@ inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb)
buffer_strcat(wb, "\t\"ml-info\": ");
buffer_strcat(wb, ml_info);
- free(ml_info);
+ freez(ml_info);
#endif
buffer_strcat(wb, "\n}");
@@ -1338,81 +1306,15 @@ inline int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb)
}
#if defined(ENABLE_ML)
-int web_client_api_request_v1_anomaly_events(RRDHOST *host, struct web_client *w, char *url) {
- if (!netdata_ready)
- return HTTP_RESP_BACKEND_FETCH_FAILED;
-
- uint32_t after = 0, before = 0;
-
- while (url) {
- char *value = mystrsep(&url, "&");
- if (!value || !*value)
- continue;
-
- char *name = mystrsep(&value, "=");
- if (!name || !*name)
- continue;
- if (!value || !*value)
- continue;
-
- if (!strcmp(name, "after"))
- after = (uint32_t) (strtoul(value, NULL, 0) / 1000);
- else if (!strcmp(name, "before"))
- before = (uint32_t) (strtoul(value, NULL, 0) / 1000);
- }
-
- char *s;
- if (!before || !after)
- s = strdupz("{\"error\": \"missing after/before parameters\" }\n");
- else {
- s = ml_get_anomaly_events(host, "AD1", 1, after, before);
- if (!s)
- s = strdupz("{\"error\": \"json string is empty\" }\n");
- }
-
- BUFFER *wb = w->response.data;
- buffer_flush(wb);
-
- wb->contenttype = CT_APPLICATION_JSON;
- buffer_strcat(wb, s);
- buffer_no_cacheable(wb);
-
- freez(s);
-
- return HTTP_RESP_OK;
-}
+int web_client_api_request_v1_ml_info(RRDHOST *host, struct web_client *w, char *url) {
+ (void) url;
-int web_client_api_request_v1_anomaly_event_info(RRDHOST *host, struct web_client *w, char *url) {
if (!netdata_ready)
return HTTP_RESP_BACKEND_FETCH_FAILED;
- uint32_t after = 0, before = 0;
-
- while (url) {
- char *value = mystrsep(&url, "&");
- if (!value || !*value)
- continue;
-
- char *name = mystrsep(&value, "=");
- if (!name || !*name)
- continue;
- if (!value || !*value)
- continue;
-
- if (!strcmp(name, "after"))
- after = (uint32_t) strtoul(value, NULL, 0);
- else if (!strcmp(name, "before"))
- before = (uint32_t) strtoul(value, NULL, 0);
- }
-
- char *s;
- if (!before || !after)
- s = strdupz("{\"error\": \"missing after/before parameters\" }\n");
- else {
- s = ml_get_anomaly_event_info(host, "AD1", 1, after, before);
- if (!s)
- s = strdupz("{\"error\": \"json string is empty\" }\n");
- }
+ char *s = ml_get_host_runtime_info(host);
+ if (!s)
+ s = strdupz("{\"error\": \"json string is empty\" }\n");
BUFFER *wb = w->response.data;
buffer_flush(wb);
@@ -1424,13 +1326,13 @@ int web_client_api_request_v1_anomaly_event_info(RRDHOST *host, struct web_clien
return HTTP_RESP_OK;
}
-int web_client_api_request_v1_ml_info(RRDHOST *host, struct web_client *w, char *url) {
+int web_client_api_request_v1_ml_models(RRDHOST *host, struct web_client *w, char *url) {
(void) url;
if (!netdata_ready)
return HTTP_RESP_BACKEND_FETCH_FAILED;
- char *s = ml_get_host_runtime_info(host);
+ char *s = ml_get_host_models(host);
if (!s)
s = strdupz("{\"error\": \"json string is empty\" }\n");
@@ -1443,8 +1345,7 @@ int web_client_api_request_v1_ml_info(RRDHOST *host, struct web_client *w, char
freez(s);
return HTTP_RESP_OK;
}
-
-#endif // defined(ENABLE_ML)
+#endif
inline int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url) {
(void)url;
@@ -1485,7 +1386,7 @@ static int web_client_api_request_v1_weights_internal(RRDHOST *host, struct web_
int options_count = 0;
RRDR_GROUPING group = RRDR_GROUPING_AVERAGE;
int timeout = 0;
- int tier = 0;
+ size_t tier = 0;
const char *group_options = NULL, *contexts_str = NULL;
while (url) {
@@ -1533,9 +1434,11 @@ static int web_client_api_request_v1_weights_internal(RRDHOST *host, struct web_
contexts_str = value;
else if(!strcmp(name, "tier")) {
- tier = str2i(value);
- if(tier >= 0 && tier < storage_tiers)
+ tier = str2ul(value);
+ if(tier < storage_tiers)
options |= RRDR_OPTION_SELECTED_TIER;
+ else
+ tier = 0;
}
}
@@ -1559,12 +1462,59 @@ int web_client_api_request_v1_weights(RRDHOST *host, struct web_client *w, char
return web_client_api_request_v1_weights_internal(host, w, url, WEIGHTS_METHOD_ANOMALY_RATE, WEIGHTS_FORMAT_CONTEXTS);
}
+int web_client_api_request_v1_function(RRDHOST *host, struct web_client *w, char *url) {
+ if (!netdata_ready)
+ return HTTP_RESP_BACKEND_FETCH_FAILED;
+
+ int timeout = 0;
+ const char *function = NULL;
+
+ while (url) {
+ char *value = mystrsep(&url, "&");
+ if (!value || !*value)
+ continue;
+
+ char *name = mystrsep(&value, "=");
+ if (!name || !*name)
+ continue;
+
+ if (!strcmp(name, "function"))
+ function = value;
+
+ else if (!strcmp(name, "timeout"))
+ timeout = (int) strtoul(value, NULL, 0);
+ }
+
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+ wb->contenttype = CT_APPLICATION_JSON;
+ buffer_no_cacheable(wb);
+
+ return rrd_call_function_and_wait(host, wb, timeout, function);
+}
+
+int web_client_api_request_v1_functions(RRDHOST *host, struct web_client *w, char *url __maybe_unused) {
+ if (!netdata_ready)
+ return HTTP_RESP_BACKEND_FETCH_FAILED;
+
+ BUFFER *wb = w->response.data;
+ buffer_flush(wb);
+ wb->contenttype = CT_APPLICATION_JSON;
+ buffer_no_cacheable(wb);
+
+ buffer_strcat(wb, "{\n");
+ host_functions2json(host, wb, 1, "\"", "\"");
+ buffer_strcat(wb, "}");
+
+ return HTTP_RESP_OK;
+}
+
#ifndef ENABLE_DBENGINE
int web_client_api_request_v1_dbengine_stats(RRDHOST *host, struct web_client *w, char *url) {
return HTTP_RESP_NOT_FOUND;
}
#else
-static void web_client_api_v1_dbengine_stats_for_tier(BUFFER *wb, int tier) {
+static void web_client_api_v1_dbengine_stats_for_tier(BUFFER *wb, size_t tier) {
RRDENG_SIZE_STATS stats = rrdeng_size_statistics(multidb_ctx[tier]);
buffer_sprintf(wb,
@@ -1588,11 +1538,11 @@ static void web_client_api_v1_dbengine_stats_for_tier(BUFFER *wb, int tier) {
",\n\t\t\"metrics_pages\":%zu"
",\n\t\t\"extents_compressed_bytes\":%zu"
",\n\t\t\"pages_uncompressed_bytes\":%zu"
- ",\n\t\t\"pages_duration_secs\":%ld"
+ ",\n\t\t\"pages_duration_secs\":%lld"
",\n\t\t\"single_point_pages\":%zu"
",\n\t\t\"first_t\":%llu"
",\n\t\t\"last_t\":%llu"
- ",\n\t\t\"database_retention_secs\":%ld"
+ ",\n\t\t\"database_retention_secs\":%lld"
",\n\t\t\"average_compression_savings\":%0.2f"
",\n\t\t\"average_point_duration_secs\":%0.2f"
",\n\t\t\"average_metric_retention_secs\":%0.2f"
@@ -1623,11 +1573,11 @@ static void web_client_api_v1_dbengine_stats_for_tier(BUFFER *wb, int tier) {
, stats.metrics_pages
, stats.extents_compressed_bytes
, stats.pages_uncompressed_bytes
- , stats.pages_duration_secs
+ , (long long)stats.pages_duration_secs
, stats.single_point_pages
, stats.first_t
, stats.last_t
- , stats.database_retention_secs
+ , (long long)stats.database_retention_secs
, stats.average_compression_savings
, stats.average_point_duration_secs
, stats.average_metric_retention_secs
@@ -1646,12 +1596,17 @@ int web_client_api_request_v1_dbengine_stats(RRDHOST *host __maybe_unused, struc
BUFFER *wb = w->response.data;
buffer_flush(wb);
+
+ if(!dbengine_enabled) {
+ buffer_strcat(wb, "dbengine is not enabled");
+ return HTTP_RESP_NOT_FOUND;
+ }
+
wb->contenttype = CT_APPLICATION_JSON;
buffer_no_cacheable(wb);
-
buffer_strcat(wb, "{");
- for(int tier = 0; tier < storage_tiers ;tier++) {
- buffer_sprintf(wb, "%s\n\t\"tier%d\": {", tier?",":"", tier);
+ for(size_t tier = 0; tier < storage_tiers ;tier++) {
+ buffer_sprintf(wb, "%s\n\t\"tier%zu\": {", tier?",":"", tier);
web_client_api_v1_dbengine_stats_for_tier(wb, tier);
buffer_strcat(wb, "\n\t}");
}
@@ -1661,48 +1616,56 @@ int web_client_api_request_v1_dbengine_stats(RRDHOST *host __maybe_unused, struc
}
#endif
+#ifdef NETDATA_DEV_MODE
+#define ACL_DEV_OPEN_ACCESS WEB_CLIENT_ACL_DASHBOARD
+#else
+#define ACL_DEV_OPEN_ACCESS 0
+#endif
+
static struct api_command {
const char *command;
uint32_t hash;
WEB_CLIENT_ACL acl;
int (*callback)(RRDHOST *host, struct web_client *w, char *url);
} api_commands[] = {
- { "info", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_info },
- { "data", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_data },
- { "chart", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_chart },
- { "charts", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_charts },
- { "context", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_context },
- { "contexts", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_contexts },
- { "archivedcharts", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_archivedcharts },
+ { "info", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_info },
+ { "data", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_data },
+ { "chart", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_chart },
+ { "charts", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_charts },
+ { "context", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_context },
+ { "contexts", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_contexts },
+ { "archivedcharts", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_archivedcharts },
// registry checks the ACL by itself, so we allow everything
- { "registry", 0, WEB_CLIENT_ACL_NOCHECK, web_client_api_request_v1_registry },
+ { "registry", 0, WEB_CLIENT_ACL_NOCHECK, web_client_api_request_v1_registry },
// badges can be fetched with both dashboard and badge permissions
- { "badge.svg", 0, WEB_CLIENT_ACL_DASHBOARD|WEB_CLIENT_ACL_BADGE, web_client_api_request_v1_badge },
+ { "badge.svg", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_badge },
- { "alarms", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarms },
- { "alarms_values", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarms_values },
- { "alarm_log", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_log },
- { "alarm_variables", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_variables },
- { "alarm_count", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_alarm_count },
- { "allmetrics", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_allmetrics },
+ { "alarms", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarms },
+ { "alarms_values", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarms_values },
+ { "alarm_log", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarm_log },
+ { "alarm_variables", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarm_variables },
+ { "alarm_count", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_alarm_count },
+ { "allmetrics", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_allmetrics },
#if defined(ENABLE_ML)
- { "anomaly_events", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_anomaly_events },
- { "anomaly_event_info", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_anomaly_event_info },
- { "ml_info", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_ml_info },
+ { "ml_info", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_ml_info },
+ { "ml_models", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_ml_models },
#endif
- { "manage/health", 0, WEB_CLIENT_ACL_MGMT, web_client_api_request_v1_mgmt_health },
- { "aclk", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_aclk_state },
- { "metric_correlations", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_metric_correlations },
- { "weights", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_weights },
+ { "manage/health", 0, WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_mgmt_health },
+ { "aclk", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_aclk_state },
+ { "metric_correlations", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_metric_correlations },
+ { "weights", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_weights },
+
+ { "function", 0, WEB_CLIENT_ACL_ACLK | ACL_DEV_OPEN_ACCESS, web_client_api_request_v1_function },
+ { "functions", 0, WEB_CLIENT_ACL_ACLK | ACL_DEV_OPEN_ACCESS, web_client_api_request_v1_functions },
- { "dbengine_stats", 0, WEB_CLIENT_ACL_DASHBOARD, web_client_api_request_v1_dbengine_stats },
+ { "dbengine_stats", 0, WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK, web_client_api_request_v1_dbengine_stats },
// terminator
- { NULL, 0, WEB_CLIENT_ACL_NONE, NULL },
+ { NULL, 0, WEB_CLIENT_ACL_NONE, NULL },
};
inline int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url) {
diff --git a/web/api/web_api_v1.h b/web/api/web_api_v1.h
index 544f1e574..e6682c99c 100644
--- a/web/api/web_api_v1.h
+++ b/web/api/web_api_v1.h
@@ -10,30 +10,31 @@
#include "web/api/queries/weights.h"
#define MAX_CHART_LABELS_FILTER (32)
-extern RRDR_OPTIONS web_client_api_request_v1_data_options(char *o);
-extern void web_client_api_request_v1_data_options_to_string(BUFFER *wb, RRDR_OPTIONS options);
-
-extern uint32_t web_client_api_request_v1_data_format(char *name);
-extern uint32_t web_client_api_request_v1_data_google_format(char *name);
-
-extern int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_alarms_values(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf));
-extern int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_archivedcharts(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url);
-extern int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb);
-extern void host_labels2json(RRDHOST *host, BUFFER *wb, size_t indentation);
-
-extern void web_client_api_v1_init(void);
-extern void web_client_api_v1_management_init(void);
+RRDR_OPTIONS web_client_api_request_v1_data_options(char *o);
+void web_client_api_request_v1_data_options_to_buffer(BUFFER *wb, RRDR_OPTIONS options);
+void web_client_api_request_v1_data_options_to_string(char *buf, size_t size, RRDR_OPTIONS options);
+
+uint32_t web_client_api_request_v1_data_format(char *name);
+uint32_t web_client_api_request_v1_data_google_format(char *name);
+
+int web_client_api_request_v1_alarms(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_alarms_values(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_single_chart(RRDHOST *host, struct web_client *w, char *url, void callback(RRDSET *st, BUFFER *buf));
+int web_client_api_request_v1_alarm_variables(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_alarm_count(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_charts(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_archivedcharts(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_chart(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_data(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_registry(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_info(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1(RRDHOST *host, struct web_client *w, char *url);
+int web_client_api_request_v1_info_fill_buffer(RRDHOST *host, BUFFER *wb);
+void host_labels2json(RRDHOST *host, BUFFER *wb, size_t indentation);
+
+void web_client_api_v1_init(void);
+void web_client_api_v1_management_init(void);
extern char *api_secret;