summaryrefslogtreecommitdiffstats
path: root/web/api/queries/weights.c
diff options
context:
space:
mode:
Diffstat (limited to 'web/api/queries/weights.c')
-rw-r--r--web/api/queries/weights.c1549
1 files changed, 1265 insertions, 284 deletions
diff --git a/web/api/queries/weights.c b/web/api/queries/weights.c
index 485aaca26..0830a969a 100644
--- a/web/api/queries/weights.c
+++ b/web/api/queries/weights.c
@@ -24,10 +24,11 @@ static struct {
const char *name;
WEIGHTS_METHOD value;
} weights_methods[] = {
- { "ks2" , WEIGHTS_METHOD_MC_KS2}
- , { "volume" , WEIGHTS_METHOD_MC_VOLUME}
- , { "anomaly-rate" , WEIGHTS_METHOD_ANOMALY_RATE}
- , { NULL , 0 }
+ { "ks2" , WEIGHTS_METHOD_MC_KS2}
+ , { "volume" , WEIGHTS_METHOD_MC_VOLUME}
+ , { "anomaly-rate" , WEIGHTS_METHOD_ANOMALY_RATE}
+ , { "value" , WEIGHTS_METHOD_VALUE}
+ , { NULL , 0 }
};
WEIGHTS_METHOD weights_string_to_method(const char *method) {
@@ -56,14 +57,18 @@ typedef enum {
struct register_result {
RESULT_FLAGS flags;
+ RRDHOST *host;
RRDCONTEXT_ACQUIRED *rca;
RRDINSTANCE_ACQUIRED *ria;
RRDMETRIC_ACQUIRED *rma;
NETDATA_DOUBLE value;
+ STORAGE_POINT highlighted;
+ STORAGE_POINT baseline;
+ usec_t duration_ut;
};
static DICTIONARY *register_result_init() {
- DICTIONARY *results = dictionary_create(DICT_OPTION_SINGLE_THREADED);
+ DICTIONARY *results = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct register_result));
return results;
}
@@ -71,14 +76,10 @@ static void register_result_destroy(DICTIONARY *results) {
dictionary_destroy(results);
}
-static void register_result(DICTIONARY *results,
- RRDCONTEXT_ACQUIRED *rca,
- RRDINSTANCE_ACQUIRED *ria,
- RRDMETRIC_ACQUIRED *rma,
- NETDATA_DOUBLE value,
- RESULT_FLAGS flags,
- WEIGHTS_STATS *stats,
- bool register_zero) {
+static void register_result(DICTIONARY *results, RRDHOST *host, RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria,
+ RRDMETRIC_ACQUIRED *rma, NETDATA_DOUBLE value, RESULT_FLAGS flags,
+ STORAGE_POINT *highlighted, STORAGE_POINT *baseline, WEIGHTS_STATS *stats,
+ bool register_zero, usec_t duration_ut) {
if(!netdata_double_isnumber(value)) return;
@@ -90,17 +91,25 @@ static void register_result(DICTIONARY *results,
return;
// keep track of the max of the baseline / highlight ratio
- if(flags & RESULT_IS_BASE_HIGH_RATIO && v > stats->max_base_high_ratio)
+ if((flags & RESULT_IS_BASE_HIGH_RATIO) && v > stats->max_base_high_ratio)
stats->max_base_high_ratio = v;
struct register_result t = {
.flags = flags,
+ .host = host,
.rca = rca,
.ria = ria,
.rma = rma,
- .value = v
+ .value = v,
+ .duration_ut = duration_ut,
};
+ if(highlighted)
+ t.highlighted = *highlighted;
+
+ if(baseline)
+ t.baseline = *baseline;
+
// we can use the pointer address or RMA as a unique key for each metric
char buf[20 + 1];
ssize_t len = snprintfz(buf, 20, "%p", rma);
@@ -114,112 +123,92 @@ static void results_header_to_json(DICTIONARY *results __maybe_unused, BUFFER *w
time_t after, time_t before,
time_t baseline_after, time_t baseline_before,
size_t points, WEIGHTS_METHOD method,
- RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ RRDR_TIME_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
size_t examined_dimensions __maybe_unused, usec_t duration,
WEIGHTS_STATS *stats) {
- buffer_sprintf(wb, "{\n"
- "\t\"after\": %lld,\n"
- "\t\"before\": %lld,\n"
- "\t\"duration\": %lld,\n"
- "\t\"points\": %zu,\n",
- (long long)after,
- (long long)before,
- (long long)(before - after),
- points
- );
-
- if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME)
- buffer_sprintf(wb, ""
- "\t\"baseline_after\": %lld,\n"
- "\t\"baseline_before\": %lld,\n"
- "\t\"baseline_duration\": %lld,\n"
- "\t\"baseline_points\": %zu,\n",
- (long long)baseline_after,
- (long long)baseline_before,
- (long long)(baseline_before - baseline_after),
- points << shifts
- );
-
- buffer_sprintf(wb, ""
- "\t\"statistics\": {\n"
- "\t\t\"query_time_ms\": %f,\n"
- "\t\t\"db_queries\": %zu,\n"
- "\t\t\"query_result_points\": %zu,\n"
- "\t\t\"binary_searches\": %zu,\n"
- "\t\t\"db_points_read\": %zu,\n"
- "\t\t\"db_points_per_tier\": [ ",
- (double)duration / (double)USEC_PER_MS,
- stats->db_queries,
- stats->result_points,
- stats->binary_searches,
- stats->db_points
- );
-
- for(size_t tier = 0; tier < storage_tiers ;tier++)
- buffer_sprintf(wb, "%s%zu", tier?", ":"", stats->db_points_per_tier[tier]);
-
- buffer_sprintf(wb, " ]\n"
- "\t},\n"
- "\t\"group\": \"%s\",\n"
- "\t\"method\": \"%s\",\n"
- "\t\"options\": \"",
- web_client_api_request_v1_data_group_to_string(group),
- weights_method_to_string(method)
- );
-
- web_client_api_request_v1_data_options_to_buffer(wb, options);
+ buffer_json_member_add_time_t(wb, "after", after);
+ buffer_json_member_add_time_t(wb, "before", before);
+ buffer_json_member_add_time_t(wb, "duration", before - after);
+ buffer_json_member_add_uint64(wb, "points", points);
+
+ if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME) {
+ buffer_json_member_add_time_t(wb, "baseline_after", baseline_after);
+ buffer_json_member_add_time_t(wb, "baseline_before", baseline_before);
+ buffer_json_member_add_time_t(wb, "baseline_duration", baseline_before - baseline_after);
+ buffer_json_member_add_uint64(wb, "baseline_points", points << shifts);
+ }
+
+ buffer_json_member_add_object(wb, "statistics");
+ {
+ buffer_json_member_add_double(wb, "query_time_ms", (double) duration / (double) USEC_PER_MS);
+ buffer_json_member_add_uint64(wb, "db_queries", stats->db_queries);
+ buffer_json_member_add_uint64(wb, "query_result_points", stats->result_points);
+ buffer_json_member_add_uint64(wb, "binary_searches", stats->binary_searches);
+ buffer_json_member_add_uint64(wb, "db_points_read", stats->db_points);
+
+ buffer_json_member_add_array(wb, "db_points_per_tier");
+ {
+ for (size_t tier = 0; tier < storage_tiers; tier++)
+ buffer_json_add_array_item_uint64(wb, stats->db_points_per_tier[tier]);
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_string(wb, "group", time_grouping_tostring(group));
+ buffer_json_member_add_string(wb, "method", weights_method_to_string(method));
+ web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", options);
}
static size_t registered_results_to_json_charts(DICTIONARY *results, BUFFER *wb,
time_t after, time_t before,
time_t baseline_after, time_t baseline_before,
size_t points, WEIGHTS_METHOD method,
- RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ RRDR_TIME_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
size_t examined_dimensions, usec_t duration,
WEIGHTS_STATS *stats) {
+ buffer_json_initialize(wb, "\"", "\"", 0, true, options & RRDR_OPTION_MINIFY);
+
results_header_to_json(results, wb, after, before, baseline_after, baseline_before,
points, method, group, options, shifts, examined_dimensions, duration, stats);
- buffer_strcat(wb, "\",\n\t\"correlated_charts\": {\n");
+ buffer_json_member_add_object(wb, "correlated_charts");
- size_t charts = 0, chart_dims = 0, total_dimensions = 0;
+ size_t charts = 0, total_dimensions = 0;
struct register_result *t;
RRDINSTANCE_ACQUIRED *last_ria = NULL; // never access this - we use it only for comparison
dfe_start_read(results, t) {
if(t->ria != last_ria) {
last_ria = t->ria;
- if(charts) buffer_strcat(wb, "\n\t\t\t}\n\t\t},\n");
- buffer_strcat(wb, "\t\t\"");
- buffer_strcat(wb, rrdinstance_acquired_id(t->ria));
- buffer_strcat(wb, "\": {\n");
- buffer_strcat(wb, "\t\t\t\"context\": \"");
- buffer_strcat(wb, rrdcontext_acquired_id(t->rca));
- buffer_strcat(wb, "\",\n\t\t\t\"dimensions\": {\n");
+ if(charts) {
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_object_close(wb); // chart:id
+ }
+
+ buffer_json_member_add_object(wb, rrdinstance_acquired_id(t->ria));
+ buffer_json_member_add_string(wb, "context", rrdcontext_acquired_id(t->rca));
+ buffer_json_member_add_object(wb, "dimensions");
charts++;
- chart_dims = 0;
}
- if (chart_dims) buffer_sprintf(wb, ",\n");
- buffer_sprintf(wb, "\t\t\t\t\"%s\": " NETDATA_DOUBLE_FORMAT, rrdmetric_acquired_name(t->rma), t->value);
- chart_dims++;
+ buffer_json_member_add_double(wb, rrdmetric_acquired_name(t->rma), t->value);
total_dimensions++;
}
dfe_done(t);
// close dimensions and chart
- if (total_dimensions)
- buffer_strcat(wb, "\n\t\t\t}\n\t\t}\n");
-
- // close correlated_charts
- buffer_sprintf(wb, "\t},\n"
- "\t\"correlated_dimensions\": %zu,\n"
- "\t\"total_dimensions_count\": %zu\n"
- "}\n",
- total_dimensions,
- examined_dimensions
- );
+ if (total_dimensions) {
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_object_close(wb); // chart:id
+ }
+
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_uint64(wb, "correlated_dimensions", total_dimensions);
+ buffer_json_member_add_uint64(wb, "total_dimensions_count", examined_dimensions);
+ buffer_json_finalize(wb);
return total_dimensions;
}
@@ -228,14 +217,16 @@ static size_t registered_results_to_json_contexts(DICTIONARY *results, BUFFER *w
time_t after, time_t before,
time_t baseline_after, time_t baseline_before,
size_t points, WEIGHTS_METHOD method,
- RRDR_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ RRDR_TIME_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
size_t examined_dimensions, usec_t duration,
WEIGHTS_STATS *stats) {
+ buffer_json_initialize(wb, "\"", "\"", 0, true, options & RRDR_OPTION_MINIFY);
+
results_header_to_json(results, wb, after, before, baseline_after, baseline_before,
points, method, group, options, shifts, examined_dimensions, duration, stats);
- buffer_strcat(wb, "\",\n\t\"contexts\": {\n");
+ buffer_json_member_add_object(wb, "contexts");
size_t contexts = 0, charts = 0, total_dimensions = 0, context_dims = 0, chart_dims = 0;
NETDATA_DOUBLE contexts_total_weight = 0.0, charts_total_weight = 0.0;
@@ -247,18 +238,17 @@ static size_t registered_results_to_json_contexts(DICTIONARY *results, BUFFER *w
if(t->rca != last_rca) {
last_rca = t->rca;
- if(contexts)
- buffer_sprintf(wb, "\n"
- "\t\t\t\t\t},\n"
- "\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
- "\t\t\t\t}\n\t\t\t},\n"
- "\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n\t\t},\n"
- , charts_total_weight / (double)chart_dims
- , contexts_total_weight / (double)context_dims);
+ if(contexts) {
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_member_add_double(wb, "weight", charts_total_weight / (double) chart_dims);
+ buffer_json_object_close(wb); // chart:id
+ buffer_json_object_close(wb); // charts
+ buffer_json_member_add_double(wb, "weight", contexts_total_weight / (double) context_dims);
+ buffer_json_object_close(wb); // context
+ }
- buffer_strcat(wb, "\t\t\"");
- buffer_strcat(wb, rrdcontext_acquired_id(t->rca));
- buffer_strcat(wb, "\": {\n\t\t\t\"charts\":{\n");
+ buffer_json_member_add_object(wb, rrdcontext_acquired_id(t->rca));
+ buffer_json_member_add_object(wb, "charts");
contexts++;
charts = 0;
@@ -271,25 +261,21 @@ static size_t registered_results_to_json_contexts(DICTIONARY *results, BUFFER *w
if(t->ria != last_ria) {
last_ria = t->ria;
- if(charts)
- buffer_sprintf(wb, "\n"
- "\t\t\t\t\t},\n"
- "\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
- "\t\t\t\t},\n"
- , charts_total_weight / (double)chart_dims);
+ if(charts) {
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_member_add_double(wb, "weight", charts_total_weight / (double) chart_dims);
+ buffer_json_object_close(wb); // chart:id
+ }
- buffer_strcat(wb, "\t\t\t\t\"");
- buffer_strcat(wb, rrdinstance_acquired_id(t->ria));
- buffer_strcat(wb, "\": {\n");
- buffer_strcat(wb, "\t\t\t\t\t\"dimensions\": {\n");
+ buffer_json_member_add_object(wb, rrdinstance_acquired_id(t->ria));
+ buffer_json_member_add_object(wb, "dimensions");
charts++;
chart_dims = 0;
charts_total_weight = 0.0;
}
- if (chart_dims) buffer_sprintf(wb, ",\n");
- buffer_sprintf(wb, "\t\t\t\t\t\t\"%s\": " NETDATA_DOUBLE_FORMAT, rrdmetric_acquired_name(t->rma), t->value);
+ buffer_json_member_add_double(wb, rrdmetric_acquired_name(t->rma), t->value);
charts_total_weight += t->value;
contexts_total_weight += t->value;
chart_dims++;
@@ -299,25 +285,794 @@ static size_t registered_results_to_json_contexts(DICTIONARY *results, BUFFER *w
dfe_done(t);
// close dimensions and chart
- if (total_dimensions)
- buffer_sprintf(wb, "\n"
- "\t\t\t\t\t},\n"
- "\t\t\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
- "\t\t\t\t}\n"
- "\t\t\t},\n"
- "\t\t\t\"weight\":" NETDATA_DOUBLE_FORMAT "\n"
- "\t\t}\n"
- , charts_total_weight / (double)chart_dims
- , contexts_total_weight / (double)context_dims);
-
- // close correlated_charts
- buffer_sprintf(wb, "\t},\n"
- "\t\"weighted_dimensions\": %zu,\n"
- "\t\"total_dimensions_count\": %zu\n"
- "}\n",
- total_dimensions,
- examined_dimensions
- );
+ if (total_dimensions) {
+ buffer_json_object_close(wb); // dimensions
+ buffer_json_member_add_double(wb, "weight", charts_total_weight / (double) chart_dims);
+ buffer_json_object_close(wb); // chart:id
+ buffer_json_object_close(wb); // charts
+ buffer_json_member_add_double(wb, "weight", contexts_total_weight / (double) context_dims);
+ buffer_json_object_close(wb); // context
+ }
+
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_uint64(wb, "correlated_dimensions", total_dimensions);
+ buffer_json_member_add_uint64(wb, "total_dimensions_count", examined_dimensions);
+ buffer_json_finalize(wb);
+
+ return total_dimensions;
+}
+
+struct query_weights_data {
+ QUERY_WEIGHTS_REQUEST *qwr;
+
+ SIMPLE_PATTERN *scope_nodes_sp;
+ SIMPLE_PATTERN *scope_contexts_sp;
+ SIMPLE_PATTERN *nodes_sp;
+ SIMPLE_PATTERN *contexts_sp;
+ SIMPLE_PATTERN *instances_sp;
+ SIMPLE_PATTERN *dimensions_sp;
+ SIMPLE_PATTERN *labels_sp;
+ SIMPLE_PATTERN *alerts_sp;
+
+ usec_t timeout_us;
+ bool timed_out;
+ bool interrupted;
+
+ struct query_timings timings;
+
+ size_t examined_dimensions;
+ bool register_zero;
+
+ DICTIONARY *results;
+ WEIGHTS_STATS stats;
+
+ uint32_t shifts;
+
+ struct query_versions versions;
+};
+
+#define AGGREGATED_WEIGHT_EMPTY (struct aggregated_weight) { \
+ .min = NAN, \
+ .max = NAN, \
+ .sum = NAN, \
+ .count = 0, \
+ .hsp = STORAGE_POINT_UNSET, \
+ .bsp = STORAGE_POINT_UNSET, \
+}
+
+#define merge_into_aw(aw, t) do { \
+ if(!(aw).count) { \
+ (aw).count = 1; \
+ (aw).min = (aw).max = (aw).sum = (t)->value; \
+ (aw).hsp = (t)->highlighted; \
+ if(baseline) \
+ (aw).bsp = (t)->baseline; \
+ } \
+ else { \
+ (aw).count++; \
+ (aw).sum += (t)->value; \
+ if((t)->value < (aw).min) \
+ (aw).min = (t)->value; \
+ if((t)->value > (aw).max) \
+ (aw).max = (t)->value; \
+ storage_point_merge_to((aw).hsp, (t)->highlighted); \
+ if(baseline) \
+ storage_point_merge_to((aw).bsp, (t)->baseline); \
+ } \
+} while(0)
+
+static void results_header_to_json_v2(DICTIONARY *results __maybe_unused, BUFFER *wb, struct query_weights_data *qwd,
+ time_t after, time_t before,
+ time_t baseline_after, time_t baseline_before,
+ size_t points, WEIGHTS_METHOD method,
+ RRDR_TIME_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions __maybe_unused, usec_t duration __maybe_unused,
+ WEIGHTS_STATS *stats, bool group_by) {
+
+ buffer_json_member_add_object(wb, "request");
+ buffer_json_member_add_string(wb, "method", weights_method_to_string(method));
+ web_client_api_request_v1_data_options_to_buffer_json_array(wb, "options", options);
+
+ buffer_json_member_add_object(wb, "scope");
+ buffer_json_member_add_string(wb, "scope_nodes", qwd->qwr->scope_nodes ? qwd->qwr->scope_nodes : "*");
+ buffer_json_member_add_string(wb, "scope_contexts", qwd->qwr->scope_contexts ? qwd->qwr->scope_contexts : "*");
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_object(wb, "selectors");
+ buffer_json_member_add_string(wb, "nodes", qwd->qwr->nodes ? qwd->qwr->nodes : "*");
+ buffer_json_member_add_string(wb, "contexts", qwd->qwr->contexts ? qwd->qwr->contexts : "*");
+ buffer_json_member_add_string(wb, "instances", qwd->qwr->instances ? qwd->qwr->instances : "*");
+ buffer_json_member_add_string(wb, "dimensions", qwd->qwr->dimensions ? qwd->qwr->dimensions : "*");
+ buffer_json_member_add_string(wb, "labels", qwd->qwr->labels ? qwd->qwr->labels : "*");
+ buffer_json_member_add_string(wb, "alerts", qwd->qwr->alerts ? qwd->qwr->alerts : "*");
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_object(wb, "window");
+ buffer_json_member_add_time_t(wb, "after", qwd->qwr->after);
+ buffer_json_member_add_time_t(wb, "before", qwd->qwr->before);
+ buffer_json_member_add_uint64(wb, "points", qwd->qwr->points);
+ if(qwd->qwr->options & RRDR_OPTION_SELECTED_TIER)
+ buffer_json_member_add_uint64(wb, "tier", qwd->qwr->tier);
+ else
+ buffer_json_member_add_string(wb, "tier", NULL);
+ buffer_json_object_close(wb);
+
+ if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME) {
+ buffer_json_member_add_object(wb, "baseline");
+ buffer_json_member_add_time_t(wb, "baseline_after", qwd->qwr->baseline_after);
+ buffer_json_member_add_time_t(wb, "baseline_before", qwd->qwr->baseline_before);
+ buffer_json_object_close(wb);
+ }
+
+ buffer_json_member_add_object(wb, "aggregations");
+ buffer_json_member_add_object(wb, "time");
+ buffer_json_member_add_string(wb, "time_group", time_grouping_tostring(qwd->qwr->time_group_method));
+ buffer_json_member_add_string(wb, "time_group_options", qwd->qwr->time_group_options);
+ buffer_json_object_close(wb); // time
+
+ buffer_json_member_add_array(wb, "metrics");
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_array(wb, "group_by");
+ buffer_json_group_by_to_array(wb, qwd->qwr->group_by.group_by);
+ buffer_json_array_close(wb);
+
+// buffer_json_member_add_array(wb, "group_by_label");
+// buffer_json_array_close(wb);
+
+ buffer_json_member_add_string(wb, "aggregation", group_by_aggregate_function_to_string(qwd->qwr->group_by.aggregation));
+ }
+ buffer_json_object_close(wb); // 1st group by
+ buffer_json_array_close(wb); // array
+ buffer_json_object_close(wb); // aggregations
+
+ buffer_json_member_add_uint64(wb, "timeout", qwd->qwr->timeout_ms);
+ buffer_json_object_close(wb); // request
+
+ buffer_json_member_add_object(wb, "view");
+ buffer_json_member_add_string(wb, "format", (group_by)?"grouped":"full");
+ buffer_json_member_add_string(wb, "time_group", time_grouping_tostring(group));
+
+ buffer_json_member_add_object(wb, "window");
+ buffer_json_member_add_time_t(wb, "after", after);
+ buffer_json_member_add_time_t(wb, "before", before);
+ buffer_json_member_add_time_t(wb, "duration", before - after);
+ buffer_json_member_add_uint64(wb, "points", points);
+ buffer_json_object_close(wb);
+
+ if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME) {
+ buffer_json_member_add_object(wb, "baseline");
+ buffer_json_member_add_time_t(wb, "after", baseline_after);
+ buffer_json_member_add_time_t(wb, "before", baseline_before);
+ buffer_json_member_add_time_t(wb, "duration", baseline_before - baseline_after);
+ buffer_json_member_add_uint64(wb, "points", points << shifts);
+ buffer_json_object_close(wb);
+ }
+
+ buffer_json_object_close(wb); // view
+
+ buffer_json_member_add_object(wb, "db");
+ {
+ buffer_json_member_add_uint64(wb, "db_queries", stats->db_queries);
+ buffer_json_member_add_uint64(wb, "query_result_points", stats->result_points);
+ buffer_json_member_add_uint64(wb, "binary_searches", stats->binary_searches);
+ buffer_json_member_add_uint64(wb, "db_points_read", stats->db_points);
+
+ buffer_json_member_add_array(wb, "db_points_per_tier");
+ {
+ for (size_t tier = 0; tier < storage_tiers; tier++)
+ buffer_json_add_array_item_uint64(wb, stats->db_points_per_tier[tier]);
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb); // db
+}
+
+typedef enum {
+ WPT_DIMENSION = 0,
+ WPT_INSTANCE = 1,
+ WPT_CONTEXT = 2,
+ WPT_NODE = 3,
+ WPT_GROUP = 4,
+} WEIGHTS_POINT_TYPE;
+
+struct aggregated_weight {
+ const char *name;
+ NETDATA_DOUBLE min;
+ NETDATA_DOUBLE max;
+ NETDATA_DOUBLE sum;
+ size_t count;
+ STORAGE_POINT hsp;
+ STORAGE_POINT bsp;
+};
+
+static inline void storage_point_to_json(BUFFER *wb, WEIGHTS_POINT_TYPE type, ssize_t di, ssize_t ii, ssize_t ci, ssize_t ni, struct aggregated_weight *aw, RRDR_OPTIONS options __maybe_unused, bool baseline) {
+ if(type != WPT_GROUP) {
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_uint64(wb, type); // "type"
+ buffer_json_add_array_item_int64(wb, ni);
+ if (type != WPT_NODE) {
+ buffer_json_add_array_item_int64(wb, ci);
+ if (type != WPT_CONTEXT) {
+ buffer_json_add_array_item_int64(wb, ii);
+ if (type != WPT_INSTANCE)
+ buffer_json_add_array_item_int64(wb, di);
+ else
+ buffer_json_add_array_item_string(wb, NULL);
+ }
+ else {
+ buffer_json_add_array_item_string(wb, NULL);
+ buffer_json_add_array_item_string(wb, NULL);
+ }
+ }
+ else {
+ buffer_json_add_array_item_string(wb, NULL);
+ buffer_json_add_array_item_string(wb, NULL);
+ buffer_json_add_array_item_string(wb, NULL);
+ }
+ buffer_json_add_array_item_double(wb, (aw->count) ? aw->sum / (NETDATA_DOUBLE)aw->count : 0.0); // "weight"
+ }
+ else {
+ buffer_json_member_add_array(wb, "v");
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_double(wb, aw->min); // "min"
+ buffer_json_add_array_item_double(wb, (aw->count) ? aw->sum / (NETDATA_DOUBLE)aw->count : 0.0); // "avg"
+ buffer_json_add_array_item_double(wb, aw->max); // "max"
+ buffer_json_add_array_item_double(wb, aw->sum); // "sum"
+ buffer_json_add_array_item_uint64(wb, aw->count); // "count"
+ buffer_json_array_close(wb);
+ }
+
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_double(wb, aw->hsp.min); // "min"
+ buffer_json_add_array_item_double(wb, (aw->hsp.count) ? aw->hsp.sum / (NETDATA_DOUBLE) aw->hsp.count : 0.0); // "avg"
+ buffer_json_add_array_item_double(wb, aw->hsp.max); // "max"
+ buffer_json_add_array_item_double(wb, aw->hsp.sum); // "sum"
+ buffer_json_add_array_item_uint64(wb, aw->hsp.count); // "count"
+ buffer_json_add_array_item_uint64(wb, aw->hsp.anomaly_count); // "anomaly_count"
+ buffer_json_array_close(wb);
+
+ if(baseline) {
+ buffer_json_add_array_item_array(wb);
+ buffer_json_add_array_item_double(wb, aw->bsp.min); // "min"
+ buffer_json_add_array_item_double(wb, (aw->bsp.count) ? aw->bsp.sum / (NETDATA_DOUBLE) aw->bsp.count : 0.0); // "avg"
+ buffer_json_add_array_item_double(wb, aw->bsp.max); // "max"
+ buffer_json_add_array_item_double(wb, aw->bsp.sum); // "sum"
+ buffer_json_add_array_item_uint64(wb, aw->bsp.count); // "count"
+ buffer_json_add_array_item_uint64(wb, aw->bsp.anomaly_count); // "anomaly_count"
+ buffer_json_array_close(wb);
+ }
+
+ buffer_json_array_close(wb);
+}
+
+static void multinode_data_schema(BUFFER *wb, RRDR_OPTIONS options __maybe_unused, const char *key, bool baseline, bool group_by) {
+ buffer_json_member_add_object(wb, key); // schema
+
+ buffer_json_member_add_string(wb, "type", "array");
+ buffer_json_member_add_array(wb, "items");
+
+ if(group_by) {
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "weight");
+ buffer_json_member_add_string(wb, "type", "array");
+ buffer_json_member_add_array(wb, "labels");
+ {
+ buffer_json_add_array_item_string(wb, "min");
+ buffer_json_add_array_item_string(wb, "avg");
+ buffer_json_add_array_item_string(wb, "max");
+ buffer_json_add_array_item_string(wb, "sum");
+ buffer_json_add_array_item_string(wb, "count");
+ }
+ buffer_json_array_close(wb);
+ }
+ buffer_json_object_close(wb);
+ }
+ else {
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "name", "row_type");
+ buffer_json_member_add_string(wb, "type", "integer");
+ buffer_json_member_add_array(wb, "value");
+ buffer_json_add_array_item_string(wb, "dimension");
+ buffer_json_add_array_item_string(wb, "instance");
+ buffer_json_add_array_item_string(wb, "context");
+ buffer_json_add_array_item_string(wb, "node");
+ buffer_json_array_close(wb);
+ buffer_json_object_close(wb);
+
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "ni");
+ buffer_json_member_add_string(wb, "type", "integer");
+ buffer_json_member_add_string(wb, "dictionary", "nodes");
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "ci");
+ buffer_json_member_add_string(wb, "type", "integer");
+ buffer_json_member_add_string(wb, "dictionary", "contexts");
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "ii");
+ buffer_json_member_add_string(wb, "type", "integer");
+ buffer_json_member_add_string(wb, "dictionary", "instances");
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "di");
+ buffer_json_member_add_string(wb, "type", "integer");
+ buffer_json_member_add_string(wb, "dictionary", "dimensions");
+ }
+ buffer_json_object_close(wb);
+
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "weight");
+ buffer_json_member_add_string(wb, "type", "number");
+ }
+ buffer_json_object_close(wb);
+ }
+
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "timeframe");
+ buffer_json_member_add_string(wb, "type", "array");
+ buffer_json_member_add_array(wb, "labels");
+ {
+ buffer_json_add_array_item_string(wb, "min");
+ buffer_json_add_array_item_string(wb, "avg");
+ buffer_json_add_array_item_string(wb, "max");
+ buffer_json_add_array_item_string(wb, "sum");
+ buffer_json_add_array_item_string(wb, "count");
+ buffer_json_add_array_item_string(wb, "anomaly_count");
+ }
+ buffer_json_array_close(wb);
+ buffer_json_member_add_object(wb, "calculations");
+ buffer_json_member_add_string(wb, "anomaly rate", "anomaly_count * 100 / count");
+ buffer_json_object_close(wb);
+ }
+ buffer_json_object_close(wb);
+
+ if(baseline) {
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "name", "baseline timeframe");
+ buffer_json_member_add_string(wb, "type", "array");
+ buffer_json_member_add_array(wb, "labels");
+ {
+ buffer_json_add_array_item_string(wb, "min");
+ buffer_json_add_array_item_string(wb, "avg");
+ buffer_json_add_array_item_string(wb, "max");
+ buffer_json_add_array_item_string(wb, "sum");
+ buffer_json_add_array_item_string(wb, "count");
+ buffer_json_add_array_item_string(wb, "anomaly_count");
+ }
+ buffer_json_array_close(wb);
+ buffer_json_member_add_object(wb, "calculations");
+ buffer_json_member_add_string(wb, "anomaly rate", "anomaly_count * 100 / count");
+ buffer_json_object_close(wb);
+ }
+ buffer_json_object_close(wb);
+ }
+
+ buffer_json_array_close(wb); // items
+ buffer_json_object_close(wb); // schema
+}
+
+struct dict_unique_node {
+ bool existing;
+ bool exposed;
+ uint32_t i;
+ RRDHOST *host;
+ usec_t duration_ut;
+};
+
+struct dict_unique_name_units {
+ bool existing;
+ bool exposed;
+ uint32_t i;
+ const char *units;
+};
+
+struct dict_unique_id_name {
+ bool existing;
+ bool exposed;
+ uint32_t i;
+ const char *id;
+ const char *name;
+};
+
+static inline struct dict_unique_node *dict_unique_node_add(DICTIONARY *dict, RRDHOST *host, ssize_t *max_id) {
+ struct dict_unique_node *dun = dictionary_set(dict, host->machine_guid, NULL, sizeof(struct dict_unique_node));
+ if(!dun->existing) {
+ dun->existing = true;
+ dun->host = host;
+ dun->i = *max_id;
+ (*max_id)++;
+ }
+
+ return dun;
+}
+
+static inline struct dict_unique_name_units *dict_unique_name_units_add(DICTIONARY *dict, const char *name, const char *units, ssize_t *max_id) {
+ struct dict_unique_name_units *dun = dictionary_set(dict, name, NULL, sizeof(struct dict_unique_name_units));
+ if(!dun->existing) {
+ dun->units = units;
+ dun->existing = true;
+ dun->i = *max_id;
+ (*max_id)++;
+ }
+
+ return dun;
+}
+
+static inline struct dict_unique_id_name *dict_unique_id_name_add(DICTIONARY *dict, const char *id, const char *name, ssize_t *max_id) {
+ char key[1024 + 1];
+ snprintfz(key, 1024, "%s:%s", id, name);
+ struct dict_unique_id_name *dun = dictionary_set(dict, key, NULL, sizeof(struct dict_unique_id_name));
+ if(!dun->existing) {
+ dun->existing = true;
+ dun->i = *max_id;
+ (*max_id)++;
+ dun->id = id;
+ dun->name = name;
+ }
+
+ return dun;
+}
+
+static size_t registered_results_to_json_multinode_no_group_by(
+ DICTIONARY *results, BUFFER *wb,
+ time_t after, time_t before,
+ time_t baseline_after, time_t baseline_before,
+ size_t points, WEIGHTS_METHOD method,
+ RRDR_TIME_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions, struct query_weights_data *qwd,
+ WEIGHTS_STATS *stats,
+ struct query_versions *versions) {
+ buffer_json_initialize(wb, "\"", "\"", 0, true, options & RRDR_OPTION_MINIFY);
+ buffer_json_member_add_uint64(wb, "api", 2);
+
+ results_header_to_json_v2(results, wb, qwd, after, before, baseline_after, baseline_before,
+ points, method, group, options, shifts, examined_dimensions,
+ qwd->timings.executed_ut - qwd->timings.received_ut, stats, false);
+
+ version_hashes_api_v2(wb, versions);
+
+ bool baseline = method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME;
+ multinode_data_schema(wb, options, "schema", baseline, false);
+
+ DICTIONARY *dict_nodes = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct dict_unique_node));
+ DICTIONARY *dict_contexts = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct dict_unique_name_units));
+ DICTIONARY *dict_instances = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct dict_unique_id_name));
+ DICTIONARY *dict_dimensions = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct dict_unique_id_name));
+
+ buffer_json_member_add_array(wb, "result");
+
+ struct aggregated_weight node_aw = AGGREGATED_WEIGHT_EMPTY, context_aw = AGGREGATED_WEIGHT_EMPTY, instance_aw = AGGREGATED_WEIGHT_EMPTY;
+ struct register_result *t;
+ RRDHOST *last_host = NULL;
+ RRDCONTEXT_ACQUIRED *last_rca = NULL;
+ RRDINSTANCE_ACQUIRED *last_ria = NULL;
+ struct dict_unique_name_units *context_dun = NULL;
+ struct dict_unique_node *node_dun = NULL;
+ struct dict_unique_id_name *instance_dun = NULL;
+ struct dict_unique_id_name *dimension_dun = NULL;
+ ssize_t di = -1, ii = -1, ci = -1, ni = -1;
+ ssize_t di_max = 0, ii_max = 0, ci_max = 0, ni_max = 0;
+ size_t total_dimensions = 0;
+ dfe_start_read(results, t) {
+
+ // close instance
+ if(t->ria != last_ria && last_ria) {
+ storage_point_to_json(wb, WPT_INSTANCE, di, ii, ci, ni, &instance_aw, options, baseline);
+ instance_dun->exposed = true;
+ last_ria = NULL;
+ instance_aw = AGGREGATED_WEIGHT_EMPTY;
+ }
+
+ // close context
+ if(t->rca != last_rca && last_rca) {
+ storage_point_to_json(wb, WPT_CONTEXT, di, ii, ci, ni, &context_aw, options, baseline);
+ context_dun->exposed = true;
+ last_rca = NULL;
+ context_aw = AGGREGATED_WEIGHT_EMPTY;
+ }
+
+ // close node
+ if(t->host != last_host && last_host) {
+ storage_point_to_json(wb, WPT_NODE, di, ii, ci, ni, &node_aw, options, baseline);
+ node_dun->exposed = true;
+ last_host = NULL;
+ node_aw = AGGREGATED_WEIGHT_EMPTY;
+ }
+
+ // open node
+ if(t->host != last_host) {
+ last_host = t->host;
+ node_dun = dict_unique_node_add(dict_nodes, t->host, &ni_max);
+ ni = node_dun->i;
+ }
+
+ // open context
+ if(t->rca != last_rca) {
+ last_rca = t->rca;
+ context_dun = dict_unique_name_units_add(dict_contexts, rrdcontext_acquired_id(t->rca),
+ rrdcontext_acquired_units(t->rca), &ci_max);
+ ci = context_dun->i;
+ }
+
+ // open instance
+ if(t->ria != last_ria) {
+ last_ria = t->ria;
+ instance_dun = dict_unique_id_name_add(dict_instances, rrdinstance_acquired_id(t->ria), rrdinstance_acquired_name(t->ria), &ii_max);
+ ii = instance_dun->i;
+ }
+
+ dimension_dun = dict_unique_id_name_add(dict_dimensions, rrdmetric_acquired_id(t->rma), rrdmetric_acquired_name(t->rma), &di_max);
+ di = dimension_dun->i;
+
+ struct aggregated_weight aw = {
+ .min = t->value,
+ .max = t->value,
+ .sum = t->value,
+ .count = 1,
+ .hsp = t->highlighted,
+ .bsp = t->baseline,
+ };
+
+ storage_point_to_json(wb, WPT_DIMENSION, di, ii, ci, ni, &aw, options, baseline);
+ node_dun->exposed = true;
+ context_dun->exposed = true;
+ instance_dun->exposed = true;
+ dimension_dun->exposed = true;
+
+ merge_into_aw(instance_aw, t);
+ merge_into_aw(context_aw, t);
+ merge_into_aw(node_aw, t);
+
+ node_dun->duration_ut += t->duration_ut;
+ total_dimensions++;
+ }
+ dfe_done(t);
+
+ // close instance
+ if(last_ria) {
+ storage_point_to_json(wb, WPT_INSTANCE, di, ii, ci, ni, &instance_aw, options, baseline);
+ instance_dun->exposed = true;
+ }
+
+ // close context
+ if(last_rca) {
+ storage_point_to_json(wb, WPT_CONTEXT, di, ii, ci, ni, &context_aw, options, baseline);
+ context_dun->exposed = true;
+ }
+
+ // close node
+ if(last_host) {
+ storage_point_to_json(wb, WPT_NODE, di, ii, ci, ni, &node_aw, options, baseline);
+ node_dun->exposed = true;
+ }
+
+ buffer_json_array_close(wb); // points
+
+ buffer_json_member_add_object(wb, "dictionaries");
+ buffer_json_member_add_array(wb, "nodes");
+ {
+ struct dict_unique_node *dun;
+ dfe_start_read(dict_nodes, dun) {
+ if(!dun->exposed)
+ continue;
+
+ buffer_json_add_array_item_object(wb);
+ buffer_json_node_add_v2(wb, dun->host, dun->i, dun->duration_ut);
+ buffer_json_object_close(wb);
+ }
+ dfe_done(dun);
+ }
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_array(wb, "contexts");
+ {
+ struct dict_unique_name_units *dun;
+ dfe_start_read(dict_contexts, dun) {
+ if(!dun->exposed)
+ continue;
+
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "id", dun_dfe.name);
+ buffer_json_member_add_string(wb, "units", dun->units);
+ buffer_json_member_add_int64(wb, "ci", dun->i);
+ buffer_json_object_close(wb);
+ }
+ dfe_done(dun);
+ }
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_array(wb, "instances");
+ {
+ struct dict_unique_id_name *dun;
+ dfe_start_read(dict_instances, dun) {
+ if(!dun->exposed)
+ continue;
+
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "id", dun->id);
+ if(dun->id != dun->name)
+ buffer_json_member_add_string(wb, "nm", dun->name);
+ buffer_json_member_add_int64(wb, "ii", dun->i);
+ buffer_json_object_close(wb);
+ }
+ dfe_done(dun);
+ }
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_array(wb, "dimensions");
+ {
+ struct dict_unique_id_name *dun;
+ dfe_start_read(dict_dimensions, dun) {
+ if(!dun->exposed)
+ continue;
+
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "id", dun->id);
+ if(dun->id != dun->name)
+ buffer_json_member_add_string(wb, "nm", dun->name);
+ buffer_json_member_add_int64(wb, "di", dun->i);
+ buffer_json_object_close(wb);
+ }
+ dfe_done(dun);
+ }
+ buffer_json_array_close(wb);
+
+ buffer_json_object_close(wb); //dictionaries
+
+ buffer_json_agents_array_v2(wb, &qwd->timings, 0);
+ buffer_json_member_add_uint64(wb, "correlated_dimensions", total_dimensions);
+ buffer_json_member_add_uint64(wb, "total_dimensions_count", examined_dimensions);
+ buffer_json_finalize(wb);
+
+ dictionary_destroy(dict_nodes);
+ dictionary_destroy(dict_contexts);
+ dictionary_destroy(dict_instances);
+ dictionary_destroy(dict_dimensions);
+
+ return total_dimensions;
+}
+
+static size_t registered_results_to_json_multinode_group_by(
+ DICTIONARY *results, BUFFER *wb,
+ time_t after, time_t before,
+ time_t baseline_after, time_t baseline_before,
+ size_t points, WEIGHTS_METHOD method,
+ RRDR_TIME_GROUPING group, RRDR_OPTIONS options, uint32_t shifts,
+ size_t examined_dimensions, struct query_weights_data *qwd,
+ WEIGHTS_STATS *stats,
+ struct query_versions *versions) {
+ buffer_json_initialize(wb, "\"", "\"", 0, true, options & RRDR_OPTION_MINIFY);
+ buffer_json_member_add_uint64(wb, "api", 2);
+
+ results_header_to_json_v2(results, wb, qwd, after, before, baseline_after, baseline_before,
+ points, method, group, options, shifts, examined_dimensions,
+ qwd->timings.executed_ut - qwd->timings.received_ut, stats, true);
+
+ version_hashes_api_v2(wb, versions);
+
+ bool baseline = method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME;
+ multinode_data_schema(wb, options, "v_schema", baseline, true);
+
+ DICTIONARY *group_by = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE,
+ NULL, sizeof(struct aggregated_weight));
+
+ struct register_result *t;
+ size_t total_dimensions = 0;
+ BUFFER *key = buffer_create(0, NULL);
+ BUFFER *name = buffer_create(0, NULL);
+ dfe_start_read(results, t) {
+
+ buffer_flush(key);
+ buffer_flush(name);
+
+ if(qwd->qwr->group_by.group_by & RRDR_GROUP_BY_DIMENSION) {
+ buffer_strcat(key, rrdmetric_acquired_name(t->rma));
+ buffer_strcat(name, rrdmetric_acquired_name(t->rma));
+ }
+ if(qwd->qwr->group_by.group_by & RRDR_GROUP_BY_INSTANCE) {
+ if(buffer_strlen(key)) {
+ buffer_fast_strcat(key, ",", 1);
+ buffer_fast_strcat(name, ",", 1);
+ }
+
+ buffer_strcat(key, rrdinstance_acquired_id(t->ria));
+ buffer_strcat(name, rrdinstance_acquired_name(t->ria));
+
+ if(!(qwd->qwr->group_by.group_by & RRDR_GROUP_BY_NODE)) {
+ buffer_fast_strcat(key, "@", 1);
+ buffer_fast_strcat(name, "@", 1);
+ buffer_strcat(key, t->host->machine_guid);
+ buffer_strcat(name, rrdhost_hostname(t->host));
+ }
+ }
+ if(qwd->qwr->group_by.group_by & RRDR_GROUP_BY_NODE) {
+ if(buffer_strlen(key)) {
+ buffer_fast_strcat(key, ",", 1);
+ buffer_fast_strcat(name, ",", 1);
+ }
+
+ buffer_strcat(key, t->host->machine_guid);
+ buffer_strcat(name, rrdhost_hostname(t->host));
+ }
+ if(qwd->qwr->group_by.group_by & RRDR_GROUP_BY_CONTEXT) {
+ if(buffer_strlen(key)) {
+ buffer_fast_strcat(key, ",", 1);
+ buffer_fast_strcat(name, ",", 1);
+ }
+
+ buffer_strcat(key, rrdcontext_acquired_id(t->rca));
+ buffer_strcat(name, rrdcontext_acquired_id(t->rca));
+ }
+ if(qwd->qwr->group_by.group_by & RRDR_GROUP_BY_UNITS) {
+ if(buffer_strlen(key)) {
+ buffer_fast_strcat(key, ",", 1);
+ buffer_fast_strcat(name, ",", 1);
+ }
+
+ buffer_strcat(key, rrdcontext_acquired_units(t->rca));
+ buffer_strcat(name, rrdcontext_acquired_units(t->rca));
+ }
+
+ struct aggregated_weight *aw = dictionary_set(group_by, buffer_tostring(key), NULL, sizeof(struct aggregated_weight));
+ if(!aw->name) {
+ aw->name = strdupz(buffer_tostring(name));
+ aw->min = aw->max = aw->sum = t->value;
+ aw->count = 1;
+ aw->hsp = t->highlighted;
+ aw->bsp = t->baseline;
+ }
+ else
+ merge_into_aw(*aw, t);
+
+ total_dimensions++;
+ }
+ dfe_done(t);
+ buffer_free(key); key = NULL;
+ buffer_free(name); name = NULL;
+
+ struct aggregated_weight *aw;
+ buffer_json_member_add_array(wb, "result");
+ dfe_start_read(group_by, aw) {
+ const char *k = aw_dfe.name;
+ const char *n = aw->name;
+
+ buffer_json_add_array_item_object(wb);
+ buffer_json_member_add_string(wb, "id", k);
+
+ if(strcmp(k, n) != 0)
+ buffer_json_member_add_string(wb, "nm", n);
+
+ storage_point_to_json(wb, WPT_GROUP, 0, 0, 0, 0, aw, options, baseline);
+ buffer_json_object_close(wb);
+
+ freez((void *)aw->name);
+ }
+ dfe_done(aw);
+ buffer_json_array_close(wb); // result
+
+ buffer_json_agents_array_v2(wb, &qwd->timings, 0);
+ buffer_json_member_add_uint64(wb, "correlated_dimensions", total_dimensions);
+ buffer_json_member_add_uint64(wb, "total_dimensions_count", examined_dimensions);
+ buffer_json_finalize(wb);
+
+ dictionary_destroy(group_by);
return total_dimensions;
}
@@ -500,14 +1255,16 @@ NETDATA_DOUBLE *rrd2rrdr_ks2(
ONEWAYALLOC *owa, RRDHOST *host,
RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria, RRDMETRIC_ACQUIRED *rma,
time_t after, time_t before, size_t points, RRDR_OPTIONS options,
- RRDR_GROUPING group_method, const char *group_options, size_t tier,
+ RRDR_TIME_GROUPING time_group_method, const char *time_group_options, size_t tier,
WEIGHTS_STATS *stats,
- size_t *entries
+ size_t *entries,
+ STORAGE_POINT *sp
) {
NETDATA_DOUBLE *ret = NULL;
QUERY_TARGET_REQUEST qtr = {
+ .version = 1,
.host = host,
.rca = rca,
.ria = ria,
@@ -516,25 +1273,27 @@ NETDATA_DOUBLE *rrd2rrdr_ks2(
.before = before,
.points = points,
.options = options,
- .group_method = group_method,
- .group_options = group_options,
+ .time_group_method = time_group_method,
+ .time_group_options = time_group_options,
.tier = tier,
.query_source = QUERY_SOURCE_API_WEIGHTS,
- .priority = STORAGE_PRIORITY_NORMAL,
+ .priority = STORAGE_PRIORITY_SYNCHRONOUS,
};
- RRDR *r = rrd2rrdr(owa, query_target_create(&qtr));
+ QUERY_TARGET *qt = query_target_create(&qtr);
+ RRDR *r = rrd2rrdr(owa, qt);
if(!r)
goto cleanup;
stats->db_queries++;
- stats->result_points += r->internal.result_points_generated;
- stats->db_points += r->internal.db_points_read;
+ stats->result_points += r->stats.result_points_generated;
+ stats->db_points += r->stats.db_points_read;
for(size_t tr = 0; tr < storage_tiers ; tr++)
- stats->db_points_per_tier[tr] += r->internal.tier_points_read[tr];
+ stats->db_points_per_tier[tr] += r->internal.qt->db.tiers[tr].points;
- if(r->d != 1) {
- error("WEIGHTS: on query '%s' expected 1 dimension in RRDR but got %zu", r->internal.qt->id, r->d);
+ if(r->d != 1 || r->internal.qt->query.used != 1) {
+ error("WEIGHTS: on query '%s' expected 1 dimension in RRDR but got %zu r->d and %zu qt->query.used",
+ r->internal.qt->id, r->d, (size_t)r->internal.qt->query.used);
goto cleanup;
}
@@ -553,6 +1312,9 @@ NETDATA_DOUBLE *rrd2rrdr_ks2(
*entries = rrdr_rows(r);
ret = onewayalloc_mallocz(owa, sizeof(NETDATA_DOUBLE) * rrdr_rows(r));
+ if(sp)
+ *sp = r->internal.qt->query.array[0].query_points;
+
// copy the points of the dimension to a contiguous array
// there is no need to check for empty values, since empty values are already zero
// https://github.com/netdata/netdata/blob/6e3144683a73a2024d51425b20ecfd569034c858/web/api/queries/average/average.c#L41-L43
@@ -560,6 +1322,7 @@ NETDATA_DOUBLE *rrd2rrdr_ks2(
cleanup:
rrdr_free(owa, r);
+ query_target_release(qt);
return ret;
}
@@ -570,27 +1333,30 @@ static void rrdset_metric_correlations_ks2(
time_t baseline_after, time_t baseline_before,
time_t after, time_t before,
size_t points, RRDR_OPTIONS options,
- RRDR_GROUPING group_method, const char *group_options, size_t tier,
+ RRDR_TIME_GROUPING time_group_method, const char *time_group_options, size_t tier,
uint32_t shifts,
WEIGHTS_STATS *stats, bool register_zero
) {
options |= RRDR_OPTION_NATURAL_POINTS;
+ usec_t started_ut = now_monotonic_usec();
ONEWAYALLOC *owa = onewayalloc_create(16 * 1024);
size_t high_points = 0;
+ STORAGE_POINT highlighted_sp;
NETDATA_DOUBLE *highlight = rrd2rrdr_ks2(
owa, host, rca, ria, rma, after, before, points,
- options, group_method, group_options, tier, stats, &high_points);
+ options, time_group_method, time_group_options, tier, stats, &high_points, &highlighted_sp);
if(!highlight)
goto cleanup;
size_t base_points = 0;
+ STORAGE_POINT baseline_sp;
NETDATA_DOUBLE *baseline = rrd2rrdr_ks2(
owa, host, rca, ria, rma, baseline_after, baseline_before, high_points << shifts,
- options, group_method, group_options, tier, stats, &base_points);
+ options, time_group_method, time_group_options, tier, stats, &base_points, &baseline_sp);
if(!baseline)
goto cleanup;
@@ -610,9 +1376,12 @@ static void rrdset_metric_correlations_ks2(
prob = 1.0;
}
+ usec_t ended_ut = now_monotonic_usec();
+
// to spread the results evenly, 0.0 needs to be the less correlated and 1.0 the most correlated
// so, we flip the result of kstwo()
- register_result(results, rca, ria, rma, 1.0 - prob, RESULT_IS_BASE_HIGH_RATIO, stats, register_zero);
+ register_result(results, host, rca, ria, rma, 1.0 - prob, RESULT_IS_BASE_HIGH_RATIO, &highlighted_sp,
+ &baseline_sp, stats, register_zero, ended_ut - started_ut);
}
cleanup:
@@ -622,8 +1391,8 @@ cleanup:
// ----------------------------------------------------------------------------
// VOLUME algorithm functions
-static void merge_query_value_to_stats(QUERY_VALUE *qv, WEIGHTS_STATS *stats) {
- stats->db_queries++;
+static void merge_query_value_to_stats(QUERY_VALUE *qv, WEIGHTS_STATS *stats, size_t queries) {
+ stats->db_queries += queries;
stats->result_points += qv->result_points;
stats->db_points += qv->points_read;
for(size_t tier = 0; tier < storage_tiers ; tier++)
@@ -636,16 +1405,16 @@ static void rrdset_metric_correlations_volume(
DICTIONARY *results,
time_t baseline_after, time_t baseline_before,
time_t after, time_t before,
- RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options,
+ RRDR_OPTIONS options, RRDR_TIME_GROUPING time_group_method, const char *time_group_options,
size_t tier,
WEIGHTS_STATS *stats, bool register_zero) {
options |= RRDR_OPTION_MATCH_IDS | RRDR_OPTION_ABSOLUTE | RRDR_OPTION_NATURAL_POINTS;
QUERY_VALUE baseline_average = rrdmetric2value(host, rca, ria, rma, baseline_after, baseline_before,
- options, group_method, group_options, tier, 0,
- QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_NORMAL);
- merge_query_value_to_stats(&baseline_average, stats);
+ options, time_group_method, time_group_options, tier, 0,
+ QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_SYNCHRONOUS);
+ merge_query_value_to_stats(&baseline_average, stats, 1);
if(!netdata_double_isnumber(baseline_average.value)) {
// this means no data for the baseline window, but we may have data for the highlighted one - assume zero
@@ -653,9 +1422,9 @@ static void rrdset_metric_correlations_volume(
}
QUERY_VALUE highlight_average = rrdmetric2value(host, rca, ria, rma, after, before,
- options, group_method, group_options, tier, 0,
- QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_NORMAL);
- merge_query_value_to_stats(&highlight_average, stats);
+ options, time_group_method, time_group_options, tier, 0,
+ QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_SYNCHRONOUS);
+ merge_query_value_to_stats(&highlight_average, stats, 1);
if(!netdata_double_isnumber(highlight_average.value))
return;
@@ -665,12 +1434,17 @@ static void rrdset_metric_correlations_volume(
return;
}
+ if((options & RRDR_OPTION_ANOMALY_BIT) && highlight_average.value < baseline_average.value) {
+ // when working on anomaly bits, we are looking for an increase in the anomaly rate
+ return;
+ }
+
char highlight_countif_options[50 + 1];
snprintfz(highlight_countif_options, 50, "%s" NETDATA_DOUBLE_FORMAT, highlight_average.value < baseline_average.value ? "<" : ">", baseline_average.value);
QUERY_VALUE highlight_countif = rrdmetric2value(host, rca, ria, rma, after, before,
options, RRDR_GROUPING_COUNTIF, highlight_countif_options, tier, 0,
- QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_NORMAL);
- merge_query_value_to_stats(&highlight_countif, stats);
+ QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_SYNCHRONOUS);
+ merge_query_value_to_stats(&highlight_countif, stats, 1);
if(!netdata_double_isnumber(highlight_countif.value)) {
info("WEIGHTS: highlighted countif query failed, but highlighted average worked - strange...");
@@ -693,31 +1467,104 @@ static void rrdset_metric_correlations_volume(
pcent = highlight_countif.value;
}
- register_result(results, rca, ria, rma, pcent, flags, stats, register_zero);
+ register_result(results, host, rca, ria, rma, pcent, flags, &highlight_average.sp, &baseline_average.sp, stats,
+ register_zero, baseline_average.duration_ut + highlight_average.duration_ut + highlight_countif.duration_ut);
}
// ----------------------------------------------------------------------------
-// ANOMALY RATE algorithm functions
+// VALUE / ANOMALY RATE algorithm functions
-static void rrdset_weights_anomaly_rate(
+static void rrdset_weights_value(
RRDHOST *host,
RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria, RRDMETRIC_ACQUIRED *rma,
DICTIONARY *results,
time_t after, time_t before,
- RRDR_OPTIONS options, RRDR_GROUPING group_method, const char *group_options,
+ RRDR_OPTIONS options, RRDR_TIME_GROUPING time_group_method, const char *time_group_options,
size_t tier,
WEIGHTS_STATS *stats, bool register_zero) {
- options |= RRDR_OPTION_MATCH_IDS | RRDR_OPTION_ANOMALY_BIT | RRDR_OPTION_NATURAL_POINTS;
+ options |= RRDR_OPTION_MATCH_IDS | RRDR_OPTION_NATURAL_POINTS;
QUERY_VALUE qv = rrdmetric2value(host, rca, ria, rma, after, before,
- options, group_method, group_options, tier, 0,
- QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_NORMAL);
+ options, time_group_method, time_group_options, tier, 0,
+ QUERY_SOURCE_API_WEIGHTS, STORAGE_PRIORITY_SYNCHRONOUS);
- merge_query_value_to_stats(&qv, stats);
+ merge_query_value_to_stats(&qv, stats, 1);
if(netdata_double_isnumber(qv.value))
- register_result(results, rca, ria, rma, qv.value, 0, stats, register_zero);
+ register_result(results, host, rca, ria, rma, qv.value, 0, &qv.sp, NULL, stats, register_zero, qv.duration_ut);
+}
+
+static void rrdset_weights_multi_dimensional_value(struct query_weights_data *qwd) {
+ QUERY_TARGET_REQUEST qtr = {
+ .version = 1,
+ .scope_nodes = qwd->qwr->scope_nodes,
+ .scope_contexts = qwd->qwr->scope_contexts,
+ .nodes = qwd->qwr->nodes,
+ .contexts = qwd->qwr->contexts,
+ .instances = qwd->qwr->instances,
+ .dimensions = qwd->qwr->dimensions,
+ .labels = qwd->qwr->labels,
+ .alerts = qwd->qwr->alerts,
+ .after = qwd->qwr->after,
+ .before = qwd->qwr->before,
+ .points = 1,
+ .options = qwd->qwr->options | RRDR_OPTION_NATURAL_POINTS,
+ .time_group_method = qwd->qwr->time_group_method,
+ .time_group_options = qwd->qwr->time_group_options,
+ .tier = qwd->qwr->tier,
+ .timeout_ms = qwd->qwr->timeout_ms,
+ .query_source = QUERY_SOURCE_API_WEIGHTS,
+ .priority = STORAGE_PRIORITY_NORMAL,
+ };
+
+ ONEWAYALLOC *owa = onewayalloc_create(16 * 1024);
+ QUERY_TARGET *qt = query_target_create(&qtr);
+ RRDR *r = rrd2rrdr(owa, qt);
+
+ if(!r || rrdr_rows(r) != 1 || !r->d || r->d != r->internal.qt->query.used)
+ goto cleanup;
+
+ QUERY_VALUE qv = {
+ .after = r->view.after,
+ .before = r->view.before,
+ .points_read = r->stats.db_points_read,
+ .result_points = r->stats.result_points_generated,
+ };
+
+ size_t queries = 0;
+ for(size_t d = 0; d < r->d ;d++) {
+ if(!rrdr_dimension_should_be_exposed(r->od[d], qwd->qwr->options))
+ continue;
+
+ long i = 0; // only one row
+ NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
+ NETDATA_DOUBLE *ar = &r->ar[ i * r->d ];
+
+ qv.value = cn[d];
+ qv.anomaly_rate = ar[d];
+ storage_point_merge_to(qv.sp, r->internal.qt->query.array[d].query_points);
+
+ if(netdata_double_isnumber(qv.value)) {
+ QUERY_METRIC *qm = query_metric(r->internal.qt, d);
+ QUERY_DIMENSION *qd = query_dimension(r->internal.qt, qm->link.query_dimension_id);
+ QUERY_INSTANCE *qi = query_instance(r->internal.qt, qm->link.query_instance_id);
+ QUERY_CONTEXT *qc = query_context(r->internal.qt, qm->link.query_context_id);
+ QUERY_NODE *qn = query_node(r->internal.qt, qm->link.query_node_id);
+
+ register_result(qwd->results, qn->rrdhost, qc->rca, qi->ria, qd->rma, qv.value, 0, &qv.sp,
+ NULL, &qwd->stats, qwd->register_zero, qm->duration_ut);
+ }
+
+ queries++;
+ }
+
+ merge_query_value_to_stats(&qv, &qwd->stats, queries);
+
+cleanup:
+ rrdr_free(owa, r);
+ query_target_release(qt);
+ onewayalloc_destroy(owa);
}
// ----------------------------------------------------------------------------
@@ -765,13 +1612,15 @@ static size_t spread_results_evenly(DICTIONARY *results, WEIGHTS_STATS *stats) {
NETDATA_DOUBLE slots[dimensions];
dimensions = 0;
dfe_start_read(results, t) {
- if(t->flags & (RESULT_IS_PERCENTAGE_OF_TIME))
+ if(t->flags & RESULT_IS_PERCENTAGE_OF_TIME)
t->value = t->value * stats->max_base_high_ratio;
slots[dimensions++] = t->value;
}
dfe_done(t);
+ if(!dimensions) return 0; // Coverity fix
+
// sort the array with the values of all dimensions
qsort(slots, dimensions, sizeof(NETDATA_DOUBLE), compare_netdata_doubles);
@@ -805,60 +1654,184 @@ static size_t spread_results_evenly(DICTIONARY *results, WEIGHTS_STATS *stats) {
// ----------------------------------------------------------------------------
// The main function
-int web_api_v1_weights(
- RRDHOST *host, BUFFER *wb, WEIGHTS_METHOD method, WEIGHTS_FORMAT format,
- RRDR_GROUPING group, const char *group_options,
- time_t baseline_after, time_t baseline_before,
- time_t after, time_t before,
- size_t points, RRDR_OPTIONS options, SIMPLE_PATTERN *contexts, size_t tier, size_t timeout) {
+static ssize_t weights_for_rrdmetric(void *data, RRDHOST *host, RRDCONTEXT_ACQUIRED *rca, RRDINSTANCE_ACQUIRED *ria, RRDMETRIC_ACQUIRED *rma) {
+ struct query_weights_data *qwd = data;
+ QUERY_WEIGHTS_REQUEST *qwr = qwd->qwr;
+
+ if(qwd->qwr->interrupt_callback && qwd->qwr->interrupt_callback(qwd->qwr->interrupt_callback_data)) {
+ qwd->interrupted = true;
+ return -1;
+ }
+
+ qwd->examined_dimensions++;
+
+ switch(qwr->method) {
+ case WEIGHTS_METHOD_VALUE:
+ rrdset_weights_value(
+ host, rca, ria, rma,
+ qwd->results,
+ qwr->after, qwr->before,
+ qwr->options, qwr->time_group_method, qwr->time_group_options, qwr->tier,
+ &qwd->stats, qwd->register_zero
+ );
+ break;
+
+ case WEIGHTS_METHOD_ANOMALY_RATE:
+ qwr->options |= RRDR_OPTION_ANOMALY_BIT;
+ rrdset_weights_value(
+ host, rca, ria, rma,
+ qwd->results,
+ qwr->after, qwr->before,
+ qwr->options, qwr->time_group_method, qwr->time_group_options, qwr->tier,
+ &qwd->stats, qwd->register_zero
+ );
+ break;
+
+ case WEIGHTS_METHOD_MC_VOLUME:
+ rrdset_metric_correlations_volume(
+ host, rca, ria, rma,
+ qwd->results,
+ qwr->baseline_after, qwr->baseline_before,
+ qwr->after, qwr->before,
+ qwr->options, qwr->time_group_method, qwr->time_group_options, qwr->tier,
+ &qwd->stats, qwd->register_zero
+ );
+ break;
+
+ default:
+ case WEIGHTS_METHOD_MC_KS2:
+ rrdset_metric_correlations_ks2(
+ host, rca, ria, rma,
+ qwd->results,
+ qwr->baseline_after, qwr->baseline_before,
+ qwr->after, qwr->before, qwr->points,
+ qwr->options, qwr->time_group_method, qwr->time_group_options, qwr->tier, qwd->shifts,
+ &qwd->stats, qwd->register_zero
+ );
+ break;
+ }
+
+ qwd->timings.executed_ut = now_monotonic_usec();
+ if(qwd->timings.executed_ut - qwd->timings.received_ut > qwd->timeout_us) {
+ qwd->timed_out = true;
+ return -1;
+ }
+
+ return 1;
+}
+
+static ssize_t weights_do_context_callback(void *data, RRDCONTEXT_ACQUIRED *rca, bool queryable_context) {
+ if(!queryable_context)
+ return false;
+
+ struct query_weights_data *qwd = data;
+
+ bool has_retention = false;
+ switch(qwd->qwr->method) {
+ case WEIGHTS_METHOD_VALUE:
+ case WEIGHTS_METHOD_ANOMALY_RATE:
+ has_retention = rrdcontext_retention_match(rca, qwd->qwr->after, qwd->qwr->before);
+ break;
+
+ case WEIGHTS_METHOD_MC_KS2:
+ case WEIGHTS_METHOD_MC_VOLUME:
+ has_retention = rrdcontext_retention_match(rca, qwd->qwr->after, qwd->qwr->before);
+ if(has_retention)
+ has_retention = rrdcontext_retention_match(rca, qwd->qwr->baseline_after, qwd->qwr->baseline_before);
+ break;
+ }
+
+ if(!has_retention)
+ return 0;
+
+ ssize_t ret = weights_foreach_rrdmetric_in_context(rca,
+ qwd->instances_sp,
+ NULL,
+ qwd->labels_sp,
+ qwd->alerts_sp,
+ qwd->dimensions_sp,
+ true, true, qwd->qwr->version,
+ weights_for_rrdmetric, qwd);
+ return ret;
+}
+
+ssize_t weights_do_node_callback(void *data, RRDHOST *host, bool queryable) {
+ if(!queryable)
+ return 0;
- WEIGHTS_STATS stats = {};
+ struct query_weights_data *qwd = data;
+
+ ssize_t ret = query_scope_foreach_context(host, qwd->qwr->scope_contexts,
+ qwd->scope_contexts_sp, qwd->contexts_sp,
+ weights_do_context_callback, queryable, qwd);
+
+ return ret;
+}
+
+int web_api_v12_weights(BUFFER *wb, QUERY_WEIGHTS_REQUEST *qwr) {
- DICTIONARY *results = register_result_init();
- DICTIONARY *metrics = NULL;
char *error = NULL;
int resp = HTTP_RESP_OK;
// if the user didn't give a timeout
// assume 60 seconds
- if(!timeout)
- timeout = 60 * MSEC_PER_SEC;
+ if(!qwr->timeout_ms)
+ qwr->timeout_ms = 5 * 60 * MSEC_PER_SEC;
// if the timeout is less than 1 second
// make it at least 1 second
- if(timeout < (long)(1 * MSEC_PER_SEC))
- timeout = 1 * MSEC_PER_SEC;
-
- usec_t timeout_usec = timeout * USEC_PER_MS;
- usec_t started_usec = now_realtime_usec();
+ if(qwr->timeout_ms < (long)(1 * MSEC_PER_SEC))
+ qwr->timeout_ms = 1 * MSEC_PER_SEC;
+
+ struct query_weights_data qwd = {
+ .qwr = qwr,
+
+ .scope_nodes_sp = string_to_simple_pattern(qwr->scope_nodes),
+ .scope_contexts_sp = string_to_simple_pattern(qwr->scope_contexts),
+ .nodes_sp = string_to_simple_pattern(qwr->nodes),
+ .contexts_sp = string_to_simple_pattern(qwr->contexts),
+ .instances_sp = string_to_simple_pattern(qwr->instances),
+ .dimensions_sp = string_to_simple_pattern(qwr->dimensions),
+ .labels_sp = string_to_simple_pattern(qwr->labels),
+ .alerts_sp = string_to_simple_pattern(qwr->alerts),
+ .timeout_us = qwr->timeout_ms * USEC_PER_MS,
+ .timed_out = false,
+ .examined_dimensions = 0,
+ .register_zero = true,
+ .results = register_result_init(),
+ .stats = {},
+ .shifts = 0,
+ .timings = {
+ .received_ut = now_monotonic_usec(),
+ }
+ };
- if(!rrdr_relative_window_to_absolute(&after, &before))
+ if(!rrdr_relative_window_to_absolute(&qwr->after, &qwr->before, NULL))
buffer_no_cacheable(wb);
- if (before <= after) {
+ if (qwr->before <= qwr->after) {
resp = HTTP_RESP_BAD_REQUEST;
error = "Invalid selected time-range.";
goto cleanup;
}
- uint32_t shifts = 0;
- if(method == WEIGHTS_METHOD_MC_KS2 || method == WEIGHTS_METHOD_MC_VOLUME) {
- if(!points) points = 500;
+ if(qwr->method == WEIGHTS_METHOD_MC_KS2 || qwr->method == WEIGHTS_METHOD_MC_VOLUME) {
+ if(!qwr->points) qwr->points = 500;
- if(baseline_before <= API_RELATIVE_TIME_MAX)
- baseline_before += after;
+ if(qwr->baseline_before <= API_RELATIVE_TIME_MAX)
+ qwr->baseline_before += qwr->after;
- rrdr_relative_window_to_absolute(&baseline_after, &baseline_before);
+ rrdr_relative_window_to_absolute(&qwr->baseline_after, &qwr->baseline_before, NULL);
- if (baseline_before <= baseline_after) {
+ if (qwr->baseline_before <= qwr->baseline_after) {
resp = HTTP_RESP_BAD_REQUEST;
error = "Invalid baseline time-range.";
goto cleanup;
}
// baseline should be a power of two multiple of highlight
- long long base_delta = baseline_before - baseline_after;
- long long high_delta = before - after;
+ long long base_delta = qwr->baseline_before - qwr->baseline_after;
+ long long high_delta = qwr->before - qwr->after;
uint32_t multiplier = (uint32_t)round((double)base_delta / (double)high_delta);
// check if the multiplier is a power of two
@@ -880,138 +1853,146 @@ int web_api_v1_weights(
// we need to do, to divide baseline numbers to match
// the highlight ones
while(multiplier > 1) {
- shifts++;
+ qwd.shifts++;
multiplier = multiplier >> 1;
}
// if the baseline size will not comply to MAX_POINTS
// lower the window of the baseline
- while(shifts && (points << shifts) > MAX_POINTS)
- shifts--;
+ while(qwd.shifts && (qwr->points << qwd.shifts) > MAX_POINTS)
+ qwd.shifts--;
// if the baseline size still does not comply to MAX_POINTS
// lower the resolution of the highlight and the baseline
- while((points << shifts) > MAX_POINTS)
- points = points >> 1;
+ while((qwr->points << qwd.shifts) > MAX_POINTS)
+ qwr->points = qwr->points >> 1;
- if(points < 15) {
+ if(qwr->points < 15) {
resp = HTTP_RESP_BAD_REQUEST;
error = "Too few points available, at least 15 are needed.";
goto cleanup;
}
// adjust the baseline to be multiplier times bigger than the highlight
- baseline_after = baseline_before - (high_delta << shifts);
+ qwr->baseline_after = qwr->baseline_before - (high_delta << qwd.shifts);
}
- size_t examined_dimensions = 0;
+ if(qwr->options & RRDR_OPTION_NONZERO) {
+ qwd.register_zero = false;
- bool register_zero = true;
- if(options & RRDR_OPTION_NONZERO) {
- register_zero = false;
- options &= ~RRDR_OPTION_NONZERO;
+ // remove it to run the queries without it
+ qwr->options &= ~RRDR_OPTION_NONZERO;
}
- metrics = rrdcontext_all_metrics_to_dict(host, contexts);
- struct metric_entry *me;
-
- // for every metric_entry in the dictionary
- dfe_start_read(metrics, me) {
- usec_t now_usec = now_realtime_usec();
- if(now_usec - started_usec > timeout_usec) {
- error = "timed out";
- resp = HTTP_RESP_GATEWAY_TIMEOUT;
- goto cleanup;
+ if(qwr->host && qwr->version == 1)
+ weights_do_node_callback(&qwd, qwr->host, true);
+ else {
+ if((qwd.qwr->method == WEIGHTS_METHOD_VALUE || qwd.qwr->method == WEIGHTS_METHOD_ANOMALY_RATE) && (qwd.contexts_sp || qwd.scope_contexts_sp)) {
+ rrdset_weights_multi_dimensional_value(&qwd);
}
-
- examined_dimensions++;
-
- switch(method) {
- case WEIGHTS_METHOD_ANOMALY_RATE:
- options |= RRDR_OPTION_ANOMALY_BIT;
- rrdset_weights_anomaly_rate(
- host,
- me->rca, me->ria, me->rma,
- results,
- after, before,
- options, group, group_options, tier,
- &stats, register_zero
- );
- break;
-
- case WEIGHTS_METHOD_MC_VOLUME:
- rrdset_metric_correlations_volume(
- host,
- me->rca, me->ria, me->rma,
- results,
- baseline_after, baseline_before,
- after, before,
- options, group, group_options, tier,
- &stats, register_zero
- );
- break;
-
- default:
- case WEIGHTS_METHOD_MC_KS2:
- rrdset_metric_correlations_ks2(
- host,
- me->rca, me->ria, me->rma,
- results,
- baseline_after, baseline_before,
- after, before, points,
- options, group, group_options, tier, shifts,
- &stats, register_zero
- );
- break;
+ else {
+ query_scope_foreach_host(qwd.scope_nodes_sp, qwd.nodes_sp,
+ weights_do_node_callback, &qwd,
+ &qwd.versions,
+ NULL);
}
}
- dfe_done(me);
- if(!register_zero)
- options |= RRDR_OPTION_NONZERO;
+ if(!qwd.register_zero) {
+ // put it back, to show it in the response
+ qwr->options |= RRDR_OPTION_NONZERO;
+ }
+
+ if(qwd.timed_out) {
+ error = "timed out";
+ resp = HTTP_RESP_GATEWAY_TIMEOUT;
+ goto cleanup;
+ }
+
+ if(qwd.interrupted) {
+ error = "interrupted";
+ resp = HTTP_RESP_BACKEND_FETCH_FAILED;
+ goto cleanup;
+ }
+
+ if(!qwd.register_zero)
+ qwr->options |= RRDR_OPTION_NONZERO;
- if(!(options & RRDR_OPTION_RETURN_RAW))
- spread_results_evenly(results, &stats);
+ if(!(qwr->options & RRDR_OPTION_RETURN_RAW) && qwr->method != WEIGHTS_METHOD_VALUE)
+ spread_results_evenly(qwd.results, &qwd.stats);
- usec_t ended_usec = now_realtime_usec();
+ usec_t ended_usec = qwd.timings.executed_ut = now_monotonic_usec();
// generate the json output we need
buffer_flush(wb);
size_t added_dimensions = 0;
- switch(format) {
+ switch(qwr->format) {
case WEIGHTS_FORMAT_CHARTS:
added_dimensions =
registered_results_to_json_charts(
- results, wb,
- after, before,
- baseline_after, baseline_before,
- points, method, group, options, shifts,
- examined_dimensions,
- ended_usec - started_usec, &stats);
+ qwd.results, wb,
+ qwr->after, qwr->before,
+ qwr->baseline_after, qwr->baseline_before,
+ qwr->points, qwr->method, qwr->time_group_method, qwr->options, qwd.shifts,
+ qwd.examined_dimensions,
+ ended_usec - qwd.timings.received_ut, &qwd.stats);
break;
- default:
case WEIGHTS_FORMAT_CONTEXTS:
added_dimensions =
registered_results_to_json_contexts(
- results, wb,
- after, before,
- baseline_after, baseline_before,
- points, method, group, options, shifts,
- examined_dimensions,
- ended_usec - started_usec, &stats);
+ qwd.results, wb,
+ qwr->after, qwr->before,
+ qwr->baseline_after, qwr->baseline_before,
+ qwr->points, qwr->method, qwr->time_group_method, qwr->options, qwd.shifts,
+ qwd.examined_dimensions,
+ ended_usec - qwd.timings.received_ut, &qwd.stats);
+ break;
+
+ default:
+ case WEIGHTS_FORMAT_MULTINODE:
+ // we don't support these groupings in weights
+ qwr->group_by.group_by &= ~(RRDR_GROUP_BY_LABEL|RRDR_GROUP_BY_SELECTED|RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE);
+ if(qwr->group_by.group_by == RRDR_GROUP_BY_NONE) {
+ added_dimensions =
+ registered_results_to_json_multinode_no_group_by(
+ qwd.results, wb,
+ qwr->after, qwr->before,
+ qwr->baseline_after, qwr->baseline_before,
+ qwr->points, qwr->method, qwr->time_group_method, qwr->options, qwd.shifts,
+ qwd.examined_dimensions,
+ &qwd, &qwd.stats, &qwd.versions);
+ }
+ else {
+ added_dimensions =
+ registered_results_to_json_multinode_group_by(
+ qwd.results, wb,
+ qwr->after, qwr->before,
+ qwr->baseline_after, qwr->baseline_before,
+ qwr->points, qwr->method, qwr->time_group_method, qwr->options, qwd.shifts,
+ qwd.examined_dimensions,
+ &qwd, &qwd.stats, &qwd.versions);
+ }
break;
}
- if(!added_dimensions) {
+ if(!added_dimensions && qwr->version < 2) {
error = "no results produced.";
resp = HTTP_RESP_NOT_FOUND;
}
cleanup:
- if(metrics) dictionary_destroy(metrics);
- if(results) register_result_destroy(results);
+ simple_pattern_free(qwd.scope_nodes_sp);
+ simple_pattern_free(qwd.scope_contexts_sp);
+ simple_pattern_free(qwd.nodes_sp);
+ simple_pattern_free(qwd.contexts_sp);
+ simple_pattern_free(qwd.instances_sp);
+ simple_pattern_free(qwd.dimensions_sp);
+ simple_pattern_free(qwd.labels_sp);
+ simple_pattern_free(qwd.alerts_sp);
+
+ register_result_destroy(qwd.results);
if(error) {
buffer_flush(wb);